diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/README.md b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/README.md deleted file mode 100644 index 73e7fa09f1502c9a79f5324cabb51128cad13fbc..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# gpt4free package - -### What is it? - -gpt4free is a python package that provides some language model api's - -### Main Features - -- It's free to use -- Easy access - -### Installation: - -```bash -pip install gpt4free -``` - -#### Usage: - -```python -import gpt4free -from gpt4free import Provider, quora, forefront - -# usage You -response = gpt4free.Completion.create(Provider.You, prompt='Write a poem on Lionel Messi') -print(response) - -# usage Poe -token = quora.Account.create(logging=False) -response = gpt4free.Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token=token, model='ChatGPT') -print(response) - -# usage forefront -token = forefront.Account.create(logging=False) -response = gpt4free.Completion.create( - Provider.ForeFront, prompt='Write a poem on Lionel Messi', model='gpt-4', token=token -) -print(response) -print(f'END') - -# usage theb -response = gpt4free.Completion.create(Provider.Theb, prompt='Write a poem on Lionel Messi') -print(response) - - -``` - -### Invocation Arguments - -`gpt4free.Completion.create()` method has two required arguments - -1. Provider: This is an enum representing different provider -2. prompt: This is the user input - -#### Keyword Arguments - -Some of the keyword arguments are optional, while others are required. - -- You: - - `safe_search`: boolean - default value is `False` - - `include_links`: boolean - default value is `False` - - `detailed`: boolean - default value is `False` -- Quora: - - `token`: str - this needs to be provided by the user - - `model`: str - default value is `gpt-4`. - - (Available models: `['Sage', 'GPT-4', 'Claude+', 'Claude-instant', 'ChatGPT', 'Dragonfly', 'NeevaAI']`) -- ForeFront: - - `token`: str - this need to be provided by the user - -- Theb: - (no keyword arguments required) - -#### Token generation of quora -```python -from gpt4free import quora - -token = quora.Account.create(logging=False) -``` - -### Token generation of ForeFront -```python -from gpt4free import forefront - -token = forefront.Account.create(logging=False) -``` - -## Copyright: - -This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt) - -### Copyright Notice: - -``` -xtekky/gpt4free: multiple reverse engineered language-model api's to decentralise the ai industry. -Copyright (C) 2023 xtekky - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see . -``` diff --git a/spaces/101-5/gpt4free/testing/binghuan/testing.py b/spaces/101-5/gpt4free/testing/binghuan/testing.py deleted file mode 100644 index 2db0b427a0aa6b78a5ffae2f3b0204325b022232..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/testing/binghuan/testing.py +++ /dev/null @@ -1,31 +0,0 @@ -from BingHuan import ChatCompletion - -# Test 1 -response = ChatCompletion.create(model="gpt-3.5-turbo", - provider="BingHuan", - stream=False, - messages=[{'role': 'user', 'content': 'who are you?'}]) - -print(response) - -# Test 2 -# this prompt will return emoji in end of response -response = ChatCompletion.create(model="gpt-3.5-turbo", - provider="BingHuan", - stream=False, - messages=[{'role': 'user', 'content': 'what you can do?'}]) - -print(response) - - -# Test 3 -response = ChatCompletion.create(model="gpt-4", - provider="BingHuan", - stream=False, - messages=[ - {'role': 'user', 'content': 'now your name is Bob'}, - {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'}, - {'role': 'user', 'content': 'what your name again?'}, - ]) - -print(response) \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Adobe After Effects Cc 2014 Crack Amtlib.dll.md b/spaces/1gistliPinn/ChatGPT4/Examples/Adobe After Effects Cc 2014 Crack Amtlib.dll.md deleted file mode 100644 index 0213bde1355d998c3a25b284191f9366716da902..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Adobe After Effects Cc 2014 Crack Amtlib.dll.md +++ /dev/null @@ -1,38 +0,0 @@ -

adobe after effects cc 2014 crack amtlib.dll


DOWNLOADhttps://imgfil.com/2uy1W2



-
-not found - -I'm running Adobe After Effects CC 2014 on a windows 7 system 64bit. - -I'm trying to add a CS6 project to this installation. It is running fine but everytime I try to add a css file or stylesheet it fails saying amtlib.dll was not found. I am running the 64bit OS. I've looked through other threads here and I've tried to: - -Add the libraries to the Adobe directory located in C:\Program Files\Adobe\Adobe After Effects CC 2014 - -Create a symbolic link pointing to C:\Program Files\Adobe\Adobe After Effects CC 2014\amtlib.dll - -Restart computer - -Nothing seems to work. Any thoughts? Any further help is appreciated. Thank you. - -A: - -In my case Adobe added the dll in the wrong folder. Where it was pointing to is the Adobe Shared\amtlib.dll, if you delete this folder and open the installation folder and make the symbolic link again, it will work. - -Pages - -Thursday, May 14, 2012 - -Thursday Thirteen - Next chapter! - -And that is the end of this story. It's been a good ride, but I think it's time for me to move on to other projects. But, what projects? - -Next story is going to be written by my buddy Gary Marti. Gary lives about thirty-five miles away from me in a little city in Texas named Oasis. He and I went to school together (seven years) and have been friends since. His wife, Kari, and I have been friends as well. - -While I've known Gary for many years, I'm really looking forward to sharing a great friendship with him. Gary and I have been discussing a story and I'm excited that he's going to write it for me. I'm even more excited that I can write along side Gary and we'll take turns with each chapter. Gary has been taking his time in working on the chapter, so he doesn't have any chapters in writing yet. - -I'm not telling you anything about this story except for the fact that it will involve a sports team and a man that will determine the fate of the team. And, just as important, he will determine the fate of the man. - -Right now, I'm thinking of some of my writing projects and decided that I'm going to write a short story about 4fefd39f24
-
-
-

diff --git a/spaces/1line/AutoGPT/autogpt/agent/agent_manager.py b/spaces/1line/AutoGPT/autogpt/agent/agent_manager.py deleted file mode 100644 index 898767a485e50b5e62625a7883edf1b30d5fddf9..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/autogpt/agent/agent_manager.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Agent manager for managing GPT agents""" -from __future__ import annotations - -from typing import Union - -from autogpt.config.config import Singleton -from autogpt.llm_utils import create_chat_completion - - -class AgentManager(metaclass=Singleton): - """Agent manager for managing GPT agents""" - - def __init__(self): - self.next_key = 0 - self.agents = {} # key, (task, full_message_history, model) - - # Create new GPT agent - # TODO: Centralise use of create_chat_completion() to globally enforce token limit - - def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]: - """Create a new agent and return its key - - Args: - task: The task to perform - prompt: The prompt to use - model: The model to use - - Returns: - The key of the new agent - """ - messages = [ - {"role": "user", "content": prompt}, - ] - - # Start GPT instance - agent_reply = create_chat_completion( - model=model, - messages=messages, - ) - - # Update full message history - messages.append({"role": "assistant", "content": agent_reply}) - - key = self.next_key - # This is done instead of len(agents) to make keys unique even if agents - # are deleted - self.next_key += 1 - - self.agents[key] = (task, messages, model) - - return key, agent_reply - - def message_agent(self, key: str | int, message: str) -> str: - """Send a message to an agent and return its response - - Args: - key: The key of the agent to message - message: The message to send to the agent - - Returns: - The agent's response - """ - task, messages, model = self.agents[int(key)] - - # Add user message to message history before sending to agent - messages.append({"role": "user", "content": message}) - - # Start GPT instance - agent_reply = create_chat_completion( - model=model, - messages=messages, - ) - - # Update full message history - messages.append({"role": "assistant", "content": agent_reply}) - - return agent_reply - - def list_agents(self) -> list[tuple[str | int, str]]: - """Return a list of all agents - - Returns: - A list of tuples of the form (key, task) - """ - - # Return a list of agent keys and their tasks - return [(key, task) for key, (task, _, _) in self.agents.items()] - - def delete_agent(self, key: Union[str, int]) -> bool: - """Delete an agent from the agent manager - - Args: - key: The key of the agent to delete - - Returns: - True if successful, False otherwise - """ - - try: - del self.agents[int(key)] - return True - except KeyError: - return False diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/COD Warzone Vondel Map High Stakes Event and More - Download Today.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/COD Warzone Vondel Map High Stakes Event and More - Download Today.md deleted file mode 100644 index 3d6f653e7661b119fdcb1d1ff77b3a5a19022f14..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/COD Warzone Vondel Map High Stakes Event and More - Download Today.md +++ /dev/null @@ -1,117 +0,0 @@ -
-

How to Download and Play COD Warzone: A Complete Guide

-

If you are looking for a thrilling and action-packed battle royale game, you might want to check out COD Warzone. This free-to-play game is set in the Modern Warfare universe and offers a variety of modes, features, and challenges to keep you entertained. In this guide, we will show you how to download and play COD Warzone on PC, PS4, and Xbox One, as well as give you some tips and tricks to help you win.

-

cod warzone download


Download Filehttps://urlin.us/2uT1zl



-

What is COD Warzone?

-

A free-to-play battle royale game set in the Modern Warfare universe

-

COD Warzone is a spin-off of the popular Call of Duty franchise, developed by Infinity Ward and Raven Software. It was released in March 2020 as a standalone game that does not require any previous Call of Duty titles to play. It is also cross-platform, meaning that you can play with your friends regardless of what device they are using.

-

COD Warzone is set in Verdansk, a fictional city inspired by real-world locations in Eastern Europe. The game features over 300 points of interest, multiple named zones, and distinct landmarks to explore. The map is constantly evolving with new updates, events, and seasons that introduce new content and changes.

-

The main features and modes of COD Warzone

-

Battle Royale: Survive against up to 150 players in a shrinking map

-

The core mode of COD Warzone is Battle Royale, where you can play solo or in teams of two, three, or four. Your goal is to be the last one standing out of up to 150 players who parachute into the map. You have to scavenge for weapons, equipment, cash, and contracts that give you objectives and rewards. You also have to avoid the gas that closes in on the map over time, forcing you to move to safer zones.

-

One of the unique features of COD Warzone's Battle Royale is the Gulag. When you die for the first time in a match, you are sent to the Gulag, where you have a chance to fight another fallen player in a 1v1 match. The winner gets to redeploy back into the game, while the loser is eliminated. You can also be revived by your teammates or buy back your teammates at Buy Stations if they have enough cash.

-

Plunder: Collect cash and loot in a race to reach $1 million

-

If you prefer a more casual and less stressful mode, you can try Plunder. In this mode, you can play in teams of two, three, or four, and your goal is to collect as much cash as possible by looting, completing contracts, killing enemies, or depositing at helipads or balloons. The first team to reach $1 million triggers overtime

where the cash values are doubled and the team with the most cash at the end wins. You can respawn unlimited times in this mode, but you lose some of your cash when you die. You can also loot cash from other players or steal their deposits.

-

Strongholds: Raid AI-protected buildings for high-tier loot and rewards

-

A new mode that was added in Season 6 of COD Warzone is Strongholds. In this mode, you can play in teams of two, three, or four, and your goal is to raid buildings that are guarded by AI enemies. These buildings contain high-tier loot, such as legendary weapons, killstreaks, and armor satchels. You also get rewards for clearing each floor and reaching the rooftop, where you can find a helicopter that will take you to the next stronghold.

-

cod warzone download size
-cod warzone download pc free
-cod warzone download ps4
-cod warzone download xbox one
-cod warzone download time
-cod warzone download error
-cod warzone download stuck
-cod warzone download slow
-cod warzone download season 4
-cod warzone download not working
-cod warzone download update
-cod warzone download link
-cod warzone download requirements
-cod warzone download mac
-cod warzone download steam
-cod warzone download speed
-cod warzone download without modern warfare
-cod warzone download problem
-cod warzone download vondel map
-cod warzone download free to play
-cod warzone download latest version
-cod warzone download offline
-cod warzone download for android
-cod warzone download for windows 10
-cod warzone download for laptop
-cod warzone download from activision website[^1^]
-cod warzone download from playstation store[^2^]
-cod warzone download from xbox store[^2^]
-cod warzone download from battle.net[^2^]
-cod warzone download from steam[^2^]
-cod warzone download high stakes event[^1^]
-cod warzone download tactical amphibious vehicle[^1^]
-cod warzone download ricochet anti-cheat[^1^]
-cod warzone download blackcell sector[^1^]
-cod warzone download tips and tricks[^3^]
-cod warzone download best settings[^3^]
-cod warzone download best weapons[^3^]
-cod warzone download best loadouts[^3^]
-cod warzone download best operators[^3^]
-cod warzone download best perks[^3^]

-

However, you are not alone in this mode. Other teams can also enter the same stronghold and compete with you for the loot and rewards. You can also encounter other teams on your way to the next stronghold or at the extraction point. You have to balance between speed and stealth, as well as teamwork and strategy, to survive and win this mode.

-

Black Sites: Explore mysterious locations for secrets and surprises

-

Another new feature that was added in Season 6 of COD Warzone is Black Sites. These are hidden locations that are scattered around the map and can only be accessed by finding and activating red access cards. These cards can be found by looting crates, completing contracts, or killing enemies. Once you activate a card, you can enter a black site and explore its secrets and surprises.

-

Black sites contain rare loot, such as specialist tokens, juggernaut suits, advanced UAVs, and self-revive kits. They also have clues and hints about the lore and story of COD Warzone, as well as Easter eggs and puzzles that can unlock rewards or trigger events. Some black sites are more dangerous than others, as they may have traps, alarms, or enemies waiting for you. You also have to watch out for other players who may follow you or ambush you at the black sites.

-

How to download COD Warzone on PC, PS4, and Xbox One

-

PC: Download the Battle.net launcher and install the game

-

If you want to play COD Warzone on PC, you need to download the Battle.net launcher from the official website of Blizzard Entertainment. This is a free platform that allows you to access and play games developed by Blizzard or its partners, such as COD Warzone. Once you download and install the launcher, you need to create an account or log in with an existing one.

-

After that, you can find COD Warzone in the Games tab of the launcher. You can click on it and then click on Install to start downloading the game. The game size is about 100 GB, so make sure you have enough space and a stable internet connection. You can also adjust the download settings and preferences in the launcher.

-

The system requirements for PC

-

Before you download COD Warzone on PC, you should check if your system meets the minimum or recommended requirements for the game. Here are the system requirements according to the official website of COD Warzone:

- | Minimum | Recommended | | --- | --- | | OS: Windows 7 64-Bit (SP1) or Windows 10 64-Bit | OS: Windows 10 64 Bit (latest update) | | CPU: Intel Core i3-4340 or AMD FX-6300 | CPU: Intel Core i5-2500K or AMD Ryzen R5 1600X | | RAM: 8 GB | RAM: 12 GB | | GPU: NVIDIA GeForce GTX 670 / NVIDIA GeForce GTX 1650 or AMD Radeon HD 7950 | GPU: NVIDIA GeForce GTX 970 / NVIDIA GeForce GTX 1660 or AMD Radeon R9 390 / AMD Radeon RX 580 | | HDD: 100 GB | HDD: 100 GB | | DirectX: Version 12 | DirectX: Version 12 |

PS4: Download the game from the PlayStation Store

-

If you want to play COD Warzone on PS4, you need to download the game from the PlayStation Store. You can access the store from your PS4 console or from a web browser on your PC or mobile device. You need to have a PlayStation Network account to access the store and download the game.

-

Once you find COD Warzone in the store, you can click on Download to start downloading the game. The game size is about 100 GB, so make sure you have enough space and a stable internet connection. You can also check the download progress and status in your Notifications menu on your PS4 console.

-

-

As mentioned, you need to have at least 100 GB of free space on your PS4 console to download and install COD Warzone. You can check your available space in the Settings menu of your console. You can also delete or move some files or games to free up some space if needed.

-

Another thing you need to play COD Warzone on PS4 is an online subscription. You need to have a PlayStation Plus membership to play online multiplayer games on PS4. This is a paid service that gives you access to online gaming, free monthly games, exclusive discounts, and more. You can buy a PlayStation Plus membership from the PlayStation Store or from a retailer. You can choose from different plans, such as monthly, quarterly, or yearly.

-

Xbox One: Download the game from the Microsoft Store

-

If you want to play COD Warzone on Xbox One, you need to download the game from the Microsoft Store. You can access the store from your Xbox One console or from a web browser on your PC or mobile device. You need to have a Microsoft account to access the store and download the game.

-

Once you find COD Warzone in the store, you can click on Get to start downloading the game. The game size is about 100 GB, so make sure you have enough space and a stable internet connection. You can also check the download progress and status in your Queue menu on your Xbox One console.

-

The storage space and online subscription required for Xbox One

-

As mentioned, you need to have at least 100 GB of free space on your Xbox One console to download and install COD Warzone. You can check your available space in the Settings menu of your console. You can also delete or move some files or games to free up some space if needed.

-

Another thing you need to play COD Warzone on Xbox One is an online subscription. You need to have an Xbox Live Gold membership to play online multiplayer games on Xbox One. This is a paid service that gives you access to online gaming, free monthly games, exclusive discounts, and more. You can buy an Xbox Live Gold membership from the Microsoft Store or from a retailer. You can choose from different plans, such as monthly, quarterly, or yearly.

-

How to play COD Warzone: Tips and tricks for beginners

-

Prioritize getting your loadout and armor satchel

-

One of the most important things to do in COD Warzone is to get your loadout and armor satchel as soon as possible. Your loadout is a custom set of weapons, perks, and equipment that you can create in the main menu of the game. You can access your loadout in a match by buying a loadout drop at a Buy Station for $10,000 or by finding one that drops randomly on the map.

-

Your loadout allows you to use your preferred weapons and perks that suit your playstyle and strategy. For example, you can use a sniper rifle and a ghost perk if you want to be stealthy and snipe enemies from afar, or you can use a shotgun and an overkill perk if you want to rush enemies and deal high damage up close.

-

Your armor satchel is an item that allows you to carry up to eight armor plates instead of five. Armor plates are essential for surviving in COD Warzone, as they give you extra health and protection from enemy fire. You can find armor plates by looting crates, enemies, or Buy Stations. You can also find armor satchels by looting legendary crates, enemies, or Buy Stations.

-

Communicate and use the ping system with your teammates

-

Another important thing to do in COD Warzone is to communicate and use the ping system with your teammates. Communication is key for teamwork and coordination in any multiplayer game, especially in a battle royale game where you have to work together to survive and win. You can communicate with your teammates by using voice chat or text chat in the game.

-

The ping system is a feature that allows you to mark locations, enemies, items, or other points of interest on the map or on your screen for your teammates to see. You can use the ping system by pressing the D-pad on your controller or the left alt key on your keyboard. You can also use different types of pings by holding down the ping button and selecting an option from the wheel menu.

-

The ping system is very useful for sharing information and giving commands without using voice chat or text chat. For example, you can ping an enemy location to warn your teammates of danger, ping a loot crate to tell your teammates where to find items, ping a Buy Station to suggest buying something, or ping a location to tell your teammates where to go or regroup.

-

Keep an eye on the map and the circle movements

-

A third important thing to do in COD Warzone is to keep an eye on the map and the circle movements. The map is your best friend in a battle royale game, as it shows you where you are, where your teammates are, where your enemies are, where the loot is, where the contracts are, where the Buy Stations are, and more. You can access the map by pressing the touchpad on your controller or the M key on your keyboard.

-

The circle movements are the mechanism that forces you and your enemies to move closer together as the match progresses. The circle is a safe zone that shrinks over time, and anyone who is outside of it will take damage from the gas. The circle movements are shown on the map as white and yellow lines, and you can also see a timer that tells you when the next circle will start moving.

-

You should always be aware of where the circle is and where it is going, as well as plan your route and position accordingly. You don't want to be caught in the gas or in a bad spot when the circle closes in. You also want to avoid being in the open or in a crowded area where you can be easily spotted or ambushed by enemies.

-

Visit strongholds and black sites for better loot and challenges

-

A fourth important thing to do in COD Warzone is to visit strongholds and black sites for better loot and challenges. As we mentioned earlier, these are new features that were added in Season 6 of COD Warzone, and they offer a lot of benefits and risks for players who dare to explore them.

-

Strongholds are buildings that are guarded by AI enemies, and they contain high-tier loot and rewards. You can find strongholds by looking for red icons on the map or on your screen. You can enter a stronghold by finding a keypad and entering a code that you can get from crates, contracts, or enemies. You can then clear each floor of the stronghold and reach the rooftop, where you can find a helicopter that will take you to the next stronghold.

-

Black sites are hidden locations that can only be accessed by finding and activating red access cards. These cards can be found by looting crates, contracts, or enemies. You can then use a card to open a door or an elevator that will take you to a black site. Black sites contain rare loot, clues, Easter eggs, puzzles, and events.

-

Both strongholds and black sites are great places to find better loot and challenges, but they also come with risks. You have to fight against AI enemies or other players who may enter the same location. You also have to manage your time and resources, as you may miss out on other opportunities or get caught by the circle if you spend too much time in these locations.

-

Play to your strengths and use cover wisely

-

A fifth important thing to do in COD Warzone is to play to your strengths and use cover wisely. COD Warzone is a game that rewards skill, strategy, and creativity, but it also punishes mistakes, carelessness, and recklessness. You have to know your strengths and weaknesses as a player, as well as your weapons and equipment.

-

You should play to your strengths and use weapons and equipment that suit your playstyle and strategy. For example, if you are good at sniping, you should use a sniper rifle and a scope that allow you to hit long-range shots. If you are good at rushing, you should use a shotgun or an SMG that allow you to deal high damage up close.

-

You should also use cover wisely and avoid exposing yourself unnecessarily. Cover is anything that can protect you from enemy fire, such as walls, buildings, rocks, trees, vehicles, etc. You should always move from cover to cover and avoid running in the open or standing still for too long. You should also use different types of cover depending on the situation. For example, if you are being sniped from afar, you should use hard cover that blocks bullets completely. If you are being rushed by enemies nearby , you should use soft cover that allows you to peek and shoot quickly.

-

Conclusion

-

COD Warzone is a fun and exciting battle royale game that offers a lot of variety, content, and challenges for players of all skill levels. Whether you want to play solo or with your friends, you can enjoy the different modes, features, and events that COD Warzone has to offer. You can also customize your loadout, explore the map, and discover secrets and surprises along the way.

-

To play COD Warzone, you need to download the game from the appropriate store depending on your device. You also need to have enough space and a stable internet connection. You may also need to have an online subscription if you are playing on PS4 or Xbox One. You can then start playing the game and follow the tips and tricks we have shared in this guide to help you win.

-

We hope you found this guide helpful and informative. If you have any questions or feedback, please let us know in the comments below. Thank you for reading and happy gaming!

-

FAQs

-

Q: How much does COD Warzone cost?

-

A: COD Warzone is a free-to-play game that does not require any previous Call of Duty titles to play. However, you may need to pay for an online subscription if you are playing on PS4 or Xbox One.

-

Q: How often does COD Warzone update?

-

A: COD Warzone updates regularly with new seasons, events, and patches that introduce new content and changes. Each season lasts for about two months and has its own theme, story, and rewards. Each event lasts for a limited time and has its own objectives, challenges, and rewards. Each patch fixes bugs, balances gameplay, and improves performance.

-

Q: How many players can play COD Warzone?

-

A: COD Warzone supports up to 150 players in a match, depending on the mode and settings. You can play solo or in teams of two, three, or four.

-

Q: How do I get better at COD Warzone?

-

A: The best way to get better at COD Warzone is to practice and learn from your mistakes. You can also watch tutorials, guides, and streams from other players who are more experienced or skilled than you. You can also try different weapons, perks, and strategies to find what works best for you.

-

Q: Is COD Warzone cross-platform?

-

A: Yes, COD Warzone is cross-platform, meaning that you can play with your friends regardless of what device they are using. You can also enable or disable cross-play in the settings menu of the game.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Ludo Nasa Now and Enjoy the Best Mobile Game of the Year.md b/spaces/1phancelerku/anime-remove-background/Download Ludo Nasa Now and Enjoy the Best Mobile Game of the Year.md deleted file mode 100644 index aab6644f717b072560f728c31b5480d7c3843624..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Ludo Nasa Now and Enjoy the Best Mobile Game of the Year.md +++ /dev/null @@ -1,132 +0,0 @@ -
-

Ludo Nasa Download: A Guide to the Most Popular Game of the Year

-

Ludo nasa is a free-to-play mobile game application that has taken the world by storm. It is a modern version of the classic board game ludo, which is derived from the ancient Indian game of Pachisi. Ludo nasa offers a variety of features and themes that make it more fun and engaging than ever. Whether you want to play with your family and friends, or challenge players from around the world, ludo nasa has something for everyone. In this article, we will tell you everything you need to know about ludo nasa download, including its history, features, and benefits.

-

History of ludo game

-

Ludo game has a long and rich history that dates back to the 6th century CE in India. It is believed that the game was created by the Indian maharajas, who played it on a board made of cloth or slate, using seeds, shells, or dice as tokens. The original version of the game was called Chaupar, and it was also described in the Indian epic Mahabharata, where it was used as a tool for gambling and deception. The game was later modified by the Mughal emperors, such as Akbar, who played it with real people as tokens on a life-sized board. The game was also known as Pachisi, which means twenty-five in Hindi, referring to the highest score possible in the game.

-

ludo nasa download


Download --->>> https://jinyurl.com/2uNRSm



-

The game spread to other countries and regions through trade and colonization, and acquired different names and variations. For example, in Spain, it was called Parcheesi; in China, it was called Chatush pada; and in Africa, it was called Ludu. The game reached England in the 19th century, where it was patented as Ludo by Alfred Collier in 1896. Ludo means "I play" in Latin, and it became a popular board game for children and adults alike. Ludo also inspired other games, such as Uckers, which was played by the Royal Navy.

-

Features of ludo game

-

Ludo game is a simple yet strategic board game that can be played by two to four players. The objective of the game is to move four tokens of the same color from the starting point to the finishing point on the board, according to the rolls of a single die. The first player to do so wins the game. However, there are some challenges and twists along the way, such as:

- -

Ludo game can be played in different modes and themes, depending on the preference of the players. Some of the common modes and themes are:

- -

Ludo game also has some social benefits that make it more enjoyable and rewarding for the players. Some of these benefits are:

- -

Ludo nasa download

-

Ludo nasa is one of the most popular and downloaded versions of ludo game in the market. It has over 100 million downloads on Google Play Store and over 10 million downloads on App Store. It is compatible with Android and iOS devices, as well as Windows PC and Mac. To download and play ludo nasa on your device, you can follow these simple steps:

-

For Android devices

-
    -
  1. Go to Google Play Store and search for ludo nasa.
  2. -
  3. Select the app from the list and tap on Install.
  4. -
  5. Wait for the app to download and install on your device.
  6. -
  7. Open the app and enjoy playing ludo nasa with your friends or online players.
  8. -
-

For iOS devices

-
    -
  1. Go to App Store and search for ludo nasa.
  2. -
  3. Select the app from the list and tap on Get.
  4. -
  5. Enter your Apple ID password or use Touch ID or Face ID to confirm.
  6. -
  7. Wait for the app to download and install on your device.
  8. -
  9. Open the app and enjoy playing ludo nasa with your friends or online players.
  10. -
-

For Windows PC or Mac

-
    -
  1. Go to https://ludonasa.com/ and click on Download for PC or Download for Mac.
  2. -
  3. Select the version that matches your operating system and click on Download Now.
  4. -
  5. Wait for the file to download on your computer.
  6. -
  7. Open the file and follow the instructions to install ludo nasa on your computer.
  8. -
  9. Launch ludo nasa from your desktop or start menu and enjoy playing ludo nasa with your friends or online players.
  10. -
-

Conclusion

-

Ludo nasa is a fun and exciting game that you can play anytime, anywhere, with anyone. It is based on the classic board game ludo, which has a long and rich history in India and other countries. Ludo nasa offers a variety of features and themes that make it more appealing and engaging than ever. It also has some social benefits that improve your cognitive, communication, and emotional skills. If you are looking for a game that can entertain you, challenge you, and connect you with others, then you should definitely try ludo nasa download. Here are some tips and tricks that can help you win more games:

-

ludo nasa game download
-ludo nasa app download
-ludo nasa apk download
-ludo nasa online game download
-ludo nasa play and win money download
-ludo nasa free download
-ludo nasa download for pc
-ludo nasa download for android
-ludo nasa download for ios
-ludo nasa download link
-ludo nasa latest version download
-ludo nasa mod apk download
-ludo nasa hack apk download
-ludo nasa unlimited money download
-ludo nasa real money game download
-ludo nasa best ludo game download
-ludo nasa india gaming awards 2023 winner download
-ludo nasa quick mode game download
-ludo nasa entertainment game download
-ludo nasa board game download
-how to download ludo nasa game
-where to download ludo nasa game
-why to download ludo nasa game
-what is ludo nasa game download
-when to download ludo nasa game
-ludonasa.com game download
-ludonasa.com app download
-ludonasa.com apk download
-ludonasa.com online game download
-ludonasa.com play and win money download
-ludonasa.com free download
-ludonasa.com download for pc
-ludonasa.com download for android
-ludonasa.com download for ios
-ludonasa.com download link
-ludonasa.com latest version download
-ludonasa.com mod apk download
-ludonasa.com hack apk download
-ludonasa.com unlimited money download
-ludonasa.com real money game download
-ludonasa.com best ludo game download
-ludonasa.com india gaming awards 2023 winner download
-ludonasa.com quick mode game download
-ludonasa.com entertainment game download
-ludonasa.com board game download
-how to play ludo nasa after downloading it
-how to win money on ludo nasa after downloading it
-how to invite friends on ludo nasa after downloading it
-how to update ludo nasa after downloading it
-how to contact support on ludo nasa after downloading it

- -

We hope that this article has given you some useful information about ludo nasa download. If you have any questions or feedback about ludo nasa, feel free to share them with us in the comments section below. Thank you for reading our article. We hope that you have learned something new and interesting about ludo nasa download. Before we end, we would like to answer some of the frequently asked questions that you might have about ludo nasa. Here are the top five FAQs that we have selected for you:

FAQs

-
    -
  1. What is the difference between ludo nasa and ludo king?
  2. -

    Ludo nasa and ludo king are both popular versions of ludo game, but they have some differences in terms of features and themes. Ludo nasa has more themes than ludo king, such as nature, Egypt, disco, and NASA. Ludo nasa also has more modes than ludo king, such as vs computer, local mode, online multiplayer, and private multiplayer. Ludo nasa also has a better user interface and graphics than ludo king.

    -
  3. How can I play ludo nasa with voice chat?
  4. -

    Ludo nasa has a voice chat feature that allows you to communicate with your friends or online players while playing the game. To use this feature, you need to enable the microphone permission on your device and join a private room with your friends or online players. Then, you can tap on the microphone icon on the top right corner of the screen to start or stop the voice chat.

    -
  5. How can I earn coins and gems in ludo nasa?
  6. -

    Coins and gems are the in-game currencies that you can use to buy different themes and items in ludo nasa. You can earn coins and gems by playing and winning games, completing daily tasks, watching ads, spinning the wheel, or inviting your friends to play the game. You can also buy coins and gems with real money if you want to.

    -
  7. How can I update ludo nasa to the latest version?
  8. -

    Ludo nasa is constantly updated with new features and improvements to enhance your gaming experience. To update ludo nasa to the latest version, you need to go to Google Play Store or App Store and check if there is any update available for the app. If there is, you can tap on Update and wait for the app to download and install on your device.

    -
  9. How can I contact the customer support of ludo nasa?
  10. -

    If you have any issues or queries about ludo nasa, you can contact the customer support of ludo nasa by sending an email to ludonasa@gmail.com or by filling out the feedback form on their website https://ludonasa.com/. They will try to respond to your message as soon as possible.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download QS Ar-Rahman - The Surah that Will Make You Cry.md b/spaces/1phancelerku/anime-remove-background/Download QS Ar-Rahman - The Surah that Will Make You Cry.md deleted file mode 100644 index 16416a8580589e88c6d2eeee5e136adfb6f79118..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download QS Ar-Rahman - The Surah that Will Make You Cry.md +++ /dev/null @@ -1,174 +0,0 @@ - -

Download QS Ar Rahman: How to Listen to the Beautiful Surah Online

-

QS Ar Rahman is one of the most beautiful and powerful surahs in the Quran. It is also known as "The Beneficent" or "The Most Merciful" because it begins with the name of Allah, the Most Compassionate. In this article, we will explore what QS Ar Rahman is, why it is important, and how you can download it in different formats and languages. We will also share some tips on how to benefit from listening to or reading this surah.

-

What is QS Ar Rahman and Why is it Important?

-

QS Ar Rahman is the 55th surah in the Quran, consisting of 78 verses. It was revealed in Medina, after the migration of the Prophet Muhammad (peace be upon him) and his companions from Mecca. It is one of the surahs that begins with one of the names of Allah, which is a rare feature in the Quran. It is also one of the surahs that has a refrain or chorus, which is repeated 31 times throughout the surah: "Maka, nikmat Tuhanmu manakah yang kamu dustakan (wahai jin dan manusia)?" This means "Then which of the favors of your Lord will you deny (O jinn and mankind)?"

-

download qs ar rahman


Download Ziphttps://jinyurl.com/2uNIib



-

QS Ar Rahman is important because it reminds us of the countless blessings and favors that Allah has bestowed upon us, both in this world and the hereafter. It also invites us to reflect on the signs of Allah's power and wisdom in His creation, such as the sun, the moon, the stars, the plants, the animals, the seas, and the human beings. It also warns us of the consequences of denying or rejecting Allah's favors, such as the punishment of hellfire or the deprivation of paradise. It also encourages us to be grateful, humble, and obedient to Allah, who is the Most Merciful and the Most Generous.

-

The Meaning and Benefits of QS Ar Rahman

-

The meaning of QS Ar Rahman is derived from its first verse, which states: "Ar-Rahman (The Most Compassionate)". This is one of the names of Allah, which describes His attribute of being infinitely kind, loving, caring, and forgiving to His creation. He is also Ar-Raheem (The Most Merciful), which means He bestows His mercy upon those who believe in Him and do good deeds. He is also Al-Wadud (The Most Loving), which means He loves those who love Him and follow His guidance.

-

The benefits of QS Ar Rahman are many, as it contains verses that praise Allah's greatness, glorify His majesty, describe His favors, warn against His wrath, promise His reward, and invite to His worship. Some of the benefits are:

- -

The Occasion and Context of Revelation of QS Ar Rahman

-

The occasion and context of revelation of QS Ar Rahman are related to the events that took place in Medina, after the migration of the Prophet Muhammad (peace be upon him) and his companions from Mecca. The surah was revealed to address the challenges and opportunities that the Muslim community faced in their new environment, such as:

- -

The surah was also revealed to highlight the contrast between the mercy and justice of Allah, and the ingratitude and rebellion of some of His creation, especially the jinn and mankind. The surah was also revealed to show the beauty and harmony of Allah's creation, and the signs and proofs of His oneness and lordship.

-

How to Download QS Ar Rahman in Different Formats and Languages

-

If you want to download QS Ar Rahman in different formats and languages, you have many options available online. You can choose from various websites and apps that offer Quran recitations, translations, tafsirs, and other resources. Here are some of the best sources that you can use:

-

Download qs ar rahman full mp3
-Download qs ar rahman 1-78 ayat
-Download qs ar rahman latin dan terjemahan
-Download qs ar rahman muzammil hasballah
-Download qs ar rahman mishary rashid alafasy
-Download qs ar rahman muhammad taha al junayd
-Download qs ar rahman maghfirah m hussein
-Download qs ar rahman hanan attaki
-Download qs ar rahman abdul basit abdus samad
-Download qs ar rahman saad al ghamdi
-Download qs ar rahman hani ar rifai
-Download qs ar rahman syekh sudais
-Download qs ar rahman nasser al qatami
-Download qs ar rahman ahmad saud
-Download qs ar rahman yusuf mansur
-Download qs ar rahman muhammad thaha dewasa
-Download qs ar rahman fatih seferagic
-Download qs ar rahman wafiq azizah
-Download qs ar rahman yusuf kalo
-Download qs ar rahman imam masjidil haram
-Download qs ar rahman muammar za
-Download qs ar rahman muhammad toha al junaid dewasa
-Download qs ar rahman salim bahanan
-Download qs ar rahman idris abkar
-Download qs ar rahman maher al muaiqly
-Download qs ar rahman ahmad al ajmi
-Download qs ar rahman abdurrahman as sudais
-Download qs ar rahman syaikh ali jaber
-Download qs ar rahman syekh ali hudaify
-Download qs ar rahman syekh shuraim
-Download qs ar rahman syekh mahmud khalil al husary
-Download qs ar rahman syekh abdullah awad al juhani
-Download qs ar rahman syekh abdullah basfar
-Download qs ar rahman syekh abdul aziz al ahmad
-Download qs ar rahman syekh abdul muhsin al qasim
-Download qs ar rahman syekh abdul wadud haneef
-Download qs ar rahman syekh abu bakr ash shatri
-Download qs ar rahman syekh adil al kalbani
-Download qs ar rahman syekh ahmad bin ali al ajmy
-Download qs ar rahman syekh akram al alaqimy
-Download qs ar rahman syekh ali abdurrahman al hudzaify
-Download qs ar rahman syekh bandar baleela
-Download qs ar rahman syekh fawaz al kaabi
-Download qs ar rahman syekh faysal noman
-Download qs ar rahman syekh ibrahim al akhdar
-Download qs ar rahman syekh ibrahim al jibreen
-Download qs ar rahman syekh ibrahim as sudaisi an nabawy

-

Download QS Ar Rahman in MP3 and Audio Formats

-

If you want to download QS Ar Rahman in MP3 and audio formats, you can use the following websites:

-

Quran.com: The Best Source for High Quality Quran Recitations

-

Quran.com is one of the most popular and reliable websites for Quran recitations. It offers high quality audio files by various reciters from different countries and styles. You can listen to or download Surah Ar Rahman by any reciter of your choice, such as Abdul Basit, Mishary Rashid, Saad Al Ghamdi, Maher Al Mueaqly, etc. You can also choose from different translations in English, Urdu, Indonesian, French, Spanish, etc. You can also read the Arabic text along with the audio, or view the word by word translation and transliteration. You can access Quran.com from any device, such as your computer, smartphone, or tablet.

-

To download Surah Ar Rahman from Quran.com, you can follow these steps:

-
    -
  1. Go to [Quran.com] and search for Surah Ar Rahman in the search bar.
  2. -
  3. Select the reciter and translation of your choice from the drop-down menus.
  4. -
  5. Click on the play button to listen to the surah online, or click on the download button to save it on your device.
  6. -
  7. You can also click on the settings icon to adjust the speed, repeat mode, night mode, etc.
  8. -
-

QuranicAudio.com: Stream or Download Quran Audio by Various Reciters

-

QuranicAudio.com is another great website for Quran audio. It offers a large collection of Quran recitations by various reciters from different countries and styles. You can stream or download Surah Ar Rahman by any reciter of your choice, such as Abdullah Basfar, Abdur Rahman As Sudais, Abu Bakr Al Shatri, Ahmed Al Ajmi, etc. You can also choose from different translations in English, Urdu, Indonesian, French, Spanish, etc. You can also read the Arabic text along with the audio.

-

To download Surah Ar Rahman from QuranicAudio.com, you can follow these steps:

-
    -
  1. Go to [QuranicAudio.com] and search for Surah Ar Rahman in the search bar.
  2. -
  3. Select the reciter and translation of your choice from the drop-down menus.
  4. -
  5. Click on the play button to listen to the surah online, or right-click on the download button and select "Save link as" to save it on your device.
  6. -
  7. You can also click on the settings icon to adjust the speed, repeat mode, night mode, etc.
  8. -
-

QuranCentral.com: Listen to Surah Ar Rahman by Different Qaris and Translations

-

QuranCentral.com is another excellent website for Quran audio. It offers a wide range of Quran recitations by different qaris (reciters) from different countries and styles. You can listen to or download Surah Ar Rahman by any qari of your choice, such as Abdul Rahman Al Sudais, Muhammad Siddiq Al Minshawi, Muhammad Jibreel, Nasser Al Qatami, etc. You can also choose from different translations in English, Urdu, Indonesian, French, Spanish, etc. You can also read the Arabic text along with the audio, or view the word by word translation and transliteration. You can access QuranCentral.com from any device, such as your computer, smartphone, or tablet.

-

To download Surah Ar Rahman from QuranCentral.com, you can follow these steps:

-
    -
  1. Go to [QuranCentral.com] and search for Surah Ar Rahman in the search bar.
  2. -
  3. Select the qari and translation of your choice from the drop-down menus.
  4. -
  5. Click on the play button to listen to the surah online, or click on the download button to save it on your device.
  6. -
  7. You can also click on the settings icon to adjust the speed, repeat mode, night mode, etc.
  8. -
-

Download QS Ar Rahman in PDF and Text Formats

-

If you want to download QS Ar Rahman in PDF and text formats, you can use the following websites:

-

LiteQuran.net: Read Surah Ar Rahman in Arabic, Latin, and Indonesian

-

LiteQuran.net is a simple and easy-to-use website for reading Quran online. It offers Surah Ar Rahman in Arabic, Latin (transliteration), and Indonesian (translation). You can also listen to the audio recitation by various reciters. You can also view the tajweed rules and color codes for each verse. You can access LiteQuran.net from any device, such as your computer, smartphone, or tablet.

-

To download Surah Ar Rahman from LiteQuran.net, you can follow these steps:

-
    -
  1. Go to [LiteQuran.net] and search for Surah Ar Rahman in the search bar.
  2. -
  3. Select the language and reciter of your choice from the drop-down menus.
  4. -
  5. Click on the play button to listen to the surah online, or click on the PDF icon to download it on your device.
  6. -
  7. You can also click on the settings icon to adjust the font size, color theme, night mode, etc.
  8. -
-

QuranBest.com: Read Surah Ar Rahman in Arabic and English with Tafsir

-

QuranBest.com is a comprehensive and interactive website for reading Quran online. It offers Surah Ar Rahman in Arabic and English (translation) with tafsir (explanation) by various scholars and sources. You can also listen to the audio recitation by various reciters. You can also view the word by word translation and transliteration for each verse. You can also access other features such as bookmarks, notes, highlights, etc. You can access QuranBest.com from any device, such as your computer, smartphone, or tablet.

-

To download Surah Ar Rahman from QuranBest.com, you can follow these steps:

-
    -
  1. Go to [QuranBest.com] and search for Surah Ar Rahman in the search bar.
  2. -
  3. Select the language, reciter, and tafsir of your choice from the drop-down menus.
  4. -
  5. Click on the play button to listen to the surah online, or click on the PDF icon to download it on your device.
  6. -
  7. You can also click on the settings icon to adjust the font size, color theme, night mode, etc.
  8. -
-

TafsirWeb.com: Read Surah Ar Rahman in Arabic and Indonesian with Tafsir

-

TafsirWeb.com is a dedicated website for reading Quran tafsir online. It offers Surah Ar Rahman in Arabic and Indonesian (translation) with tafsir (explanation) by various scholars and sources. You can also listen to the audio recitation by various reciters. You can also view the word by word translation and transliteration for each verse. You can also access other features such as bookmarks, notes, highlights, etc. You can access TafsirWeb.com from any device, such as your computer, smartphone, or tablet.

-

To download Surah Ar Rahman from TafsirWeb.com, you can follow these steps:

-
    -
  1. Go to [TafsirWeb.com] and search for Surah Ar Rahman in the search bar.
  2. -
  3. Select the language, reciter and tafsir of your choice from the drop-down menus.
  4. -
  5. Click on the play button to listen to the surah online, or click on the PDF icon to download it on your device.
  6. -
  7. You can also click on the settings icon to adjust the font size, color theme, night mode, etc.
  8. -
-

How to Benefit from Listening to or Reading QS Ar Rahman

-

Listening to or reading QS Ar Rahman is not enough to benefit from its blessings and lessons. We also need to understand its meaning, reflect on its message, and apply its teachings in our daily life. Here are some tips on how to do that:

-

Tips for Reciting or Listening to QS Ar Rahman with Focus and Reflection

-

Reciting or listening to QS Ar Rahman with focus and reflection means paying attention to the words and their meanings, and thinking about their implications and relevance for us. Here are some tips on how to do that:

- -

Tips for Applying the Lessons of QS Ar Rahman in Daily Life

-

Applying the lessons of QS Ar Rahman in daily life means living according to its teachings and values, and implementing its wisdom and advice in our actions and interactions. Here are some tips on how to do that:

- -

Conclusion

-

QS Ar Rahman is a beautiful and powerful surah that reminds us of Allah's mercy and favors, and invites us to reflect on His signs and proofs. It also warns us of the consequences of denying or rejecting His favors, and encourages us to be grateful, humble, and obedient to Him. We can benefit from this surah by downloading it in different formats and languages, and by reciting or listening to it with focus and reflection. We can also apply its lessons in our daily life by living according to its teachings and values. We ask Allah to make us among those who recite, listen, understand, and act upon QS Ar Rahman. Ameen.

-

FAQs

-

Here are some frequently asked questions about QS Ar Rahman:

-
    -
  1. What is the main theme of QS Ar Rahman?
  2. -

    The main theme of QS Ar Rahman is the mercy and favors of Allah, and the response of His creation to them.

    -
  3. How many times is the refrain "Then which of the favors of your Lord will you deny (O jinn and mankind)?" repeated in QS Ar Rahman?
  4. -

    The refrain is repeated 31 times throughout the surah.

    -
  5. What are some of the favors of Allah that are mentioned in QS Ar Rahman?
  6. -

    Some of the favors of Allah that are mentioned in QS Ar Rahman are: the Quran, the creation of man and jinn, the sun and the moon, the stars and the trees, the sky and the earth, the seas and the rivers, the fruits and the grains, the pearls and the corals, the gardens and the springs, etc.

    -
  7. What are some of the consequences of denying or rejecting Allah's favors that are mentioned in QS Ar Rahman?
  8. -

    Some of the consequences of denying or rejecting Allah's favors that are mentioned in QS Ar Rahman are: the punishment of hellfire, the scorching wind and boiling water, the chains and iron collars, etc.

    -
  9. What are some of the rewards for those who believe in Allah and do good deeds that are mentioned in QS Ar Rahman?
  10. -

    Some of the rewards for those who believe in Allah and do good deeds that are mentioned in QS Ar Rahman are: the gardens of paradise, the companionship of pure spouses, the honor and dignity from Allah, etc.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/2ndelement/voicevox/voicevox_engine/dev/synthesis_engine/__init__.py b/spaces/2ndelement/voicevox/voicevox_engine/dev/synthesis_engine/__init__.py deleted file mode 100644 index e7b2ac5b17594c2a9a137e23a72210209f2cbd4b..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/voicevox_engine/dev/synthesis_engine/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .mock import MockSynthesisEngine - -__all__ = ["MockSynthesisEngine"] diff --git a/spaces/4Taps/SadTalker/src/utils/preprocess.py b/spaces/4Taps/SadTalker/src/utils/preprocess.py deleted file mode 100644 index 4e3dad8d4a49080a3300f672965a11a8a2054fa2..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/utils/preprocess.py +++ /dev/null @@ -1,152 +0,0 @@ -import numpy as np -import cv2, os, sys, torch -from tqdm import tqdm -from PIL import Image - -# 3dmm extraction -from src.face3d.util.preprocess import align_img -from src.face3d.util.load_mats import load_lm3d -from src.face3d.models import networks -from src.face3d.extract_kp_videos import KeypointExtractor - -from scipy.io import loadmat, savemat -from src.utils.croper import Croper - -import warnings -warnings.filterwarnings("ignore") - -def split_coeff(coeffs): - """ - Return: - coeffs_dict -- a dict of torch.tensors - - Parameters: - coeffs -- torch.tensor, size (B, 256) - """ - id_coeffs = coeffs[:, :80] - exp_coeffs = coeffs[:, 80: 144] - tex_coeffs = coeffs[:, 144: 224] - angles = coeffs[:, 224: 227] - gammas = coeffs[:, 227: 254] - translations = coeffs[:, 254:] - return { - 'id': id_coeffs, - 'exp': exp_coeffs, - 'tex': tex_coeffs, - 'angle': angles, - 'gamma': gammas, - 'trans': translations - } - - -class CropAndExtract(): - def __init__(self, path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device): - - self.croper = Croper(path_of_lm_croper) - self.kp_extractor = KeypointExtractor(device) - self.net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='').to(device) - checkpoint = torch.load(path_of_net_recon_model, map_location=torch.device(device)) - self.net_recon.load_state_dict(checkpoint['net_recon']) - self.net_recon.eval() - self.lm3d_std = load_lm3d(dir_of_BFM_fitting) - self.device = device - - def generate(self, input_path, save_dir, crop_or_resize='crop'): - - pic_size = 256 - pic_name = os.path.splitext(os.path.split(input_path)[-1])[0] - - landmarks_path = os.path.join(save_dir, pic_name+'_landmarks.txt') - coeff_path = os.path.join(save_dir, pic_name+'.mat') - png_path = os.path.join(save_dir, pic_name+'.png') - - #load input - if not os.path.isfile(input_path): - raise ValueError('input_path must be a valid path to video/image file') - elif input_path.split('.')[1] in ['jpg', 'png', 'jpeg']: - # loader for first frame - full_frames = [cv2.imread(input_path)] - fps = 25 - else: - # loader for videos - video_stream = cv2.VideoCapture(input_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - full_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - full_frames.append(frame) - break - x_full_frames = [cv2.cvtColor(full_frames[0], cv2.COLOR_BGR2RGB) ] - - if crop_or_resize.lower() == 'crop': # default crop - x_full_frames, crop, quad = self.croper.crop(x_full_frames, xsize=pic_size) - clx, cly, crx, cry = crop - lx, ly, rx, ry = quad - lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) - oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - original_size = (ox2 - ox1, oy2 - oy1) - else: - oy1, oy2, ox1, ox2 = 0, x_full_frames[0].shape[0], 0, x_full_frames[0].shape[1] - original_size = (ox2 - ox1, oy2 - oy1) - - frames_pil = [Image.fromarray(cv2.resize(frame,(pic_size, pic_size))) for frame in x_full_frames] - if len(frames_pil) == 0: - print('No face is detected in the input file') - return None, None - - # save crop info - for frame in frames_pil: - cv2.imwrite(png_path, cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)) - - # 2. get the landmark according to the detected face. - if not os.path.isfile(landmarks_path): - lm = self.kp_extractor.extract_keypoint(frames_pil, landmarks_path) - else: - print(' Using saved landmarks.') - lm = np.loadtxt(landmarks_path).astype(np.float32) - lm = lm.reshape([len(x_full_frames), -1, 2]) - - if not os.path.isfile(coeff_path): - # load 3dmm paramter generator from Deep3DFaceRecon_pytorch - video_coeffs, full_coeffs = [], [] - for idx in tqdm(range(len(frames_pil)), desc='3DMM Extraction In Video:'): - frame = frames_pil[idx] - W,H = frame.size - lm1 = lm[idx].reshape([-1, 2]) - - if np.mean(lm1) == -1: - lm1 = (self.lm3d_std[:, :2]+1)/2. - lm1 = np.concatenate( - [lm1[:, :1]*W, lm1[:, 1:2]*H], 1 - ) - else: - lm1[:, -1] = H - 1 - lm1[:, -1] - - trans_params, im1, lm1, _ = align_img(frame, lm1, self.lm3d_std) - - trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]).astype(np.float32) - im_t = torch.tensor(np.array(im1)/255., dtype=torch.float32).permute(2, 0, 1).to(self.device).unsqueeze(0) - - with torch.no_grad(): - full_coeff = self.net_recon(im_t) - coeffs = split_coeff(full_coeff) - - pred_coeff = {key:coeffs[key].cpu().numpy() for key in coeffs} - - pred_coeff = np.concatenate([ - pred_coeff['exp'], - pred_coeff['angle'], - pred_coeff['trans'], - trans_params[2:][None], - ], 1) - video_coeffs.append(pred_coeff) - full_coeffs.append(full_coeff.cpu().numpy()) - - semantic_npy = np.array(video_coeffs)[:,0] - - savemat(coeff_path, {'coeff_3dmm': semantic_npy, 'full_3dmm': np.array(full_coeffs)[0]}) - - return coeff_path, png_path, original_size \ No newline at end of file diff --git a/spaces/801artistry/RVC801/rvc_for_realtime.py b/spaces/801artistry/RVC801/rvc_for_realtime.py deleted file mode 100644 index 55070f668c385ba0a9ba50989b282448cd75e59b..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/rvc_for_realtime.py +++ /dev/null @@ -1,297 +0,0 @@ -import faiss, torch, traceback, parselmouth, numpy as np, torchcrepe, torch.nn as nn, pyworld -from fairseq import checkpoint_utils -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -import os, sys -from time import time as ttime -import torch.nn.functional as F -import scipy.signal as signal - -now_dir = os.getcwd() -sys.path.append(now_dir) -from configs.config import Config -from multiprocessing import Manager as M - -mm = M() -config = Config() - - -class RVC: - def __init__( - self, key, pth_path, index_path, index_rate, n_cpu, inp_q, opt_q, device - ) -> None: - """ - 初始化 - """ - try: - global config - self.inp_q = inp_q - self.opt_q = opt_q - self.device = device - self.f0_up_key = key - self.time_step = 160 / 16000 * 1000 - self.f0_min = 50 - self.f0_max = 1100 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - self.sr = 16000 - self.window = 160 - self.n_cpu = n_cpu - if index_rate != 0: - self.index = faiss.read_index(index_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - print("index search enabled") - self.index_rate = index_rate - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - self.model = hubert_model - cpt = torch.load(pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - print(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - if config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - self.is_half = config.is_half - except: - print(traceback.format_exc()) - - def get_f0_post(self, f0): - f0_min = self.f0_min - f0_max = self.f0_max - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int_) - return f0_coarse, f0bak - - def get_f0(self, x, f0_up_key, n_cpu, method="harvest"): - n_cpu = int(n_cpu) - if method == "crepe": - return self.get_f0_crepe(x, f0_up_key) - if method == "rmvpe": - return self.get_f0_rmvpe(x, f0_up_key) - if method == "pm": - p_len = x.shape[0] // 160 - f0 = ( - parselmouth.Sound(x, 16000) - .to_pitch_ac( - time_step=0.01, - voicing_threshold=0.6, - pitch_floor=50, - pitch_ceiling=1100, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - print(pad_size, p_len - len(f0) - pad_size) - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - if n_cpu == 1: - f0, t = pyworld.harvest( - x.astype(np.double), - fs=16000, - f0_ceil=1100, - f0_floor=50, - frame_period=10, - ) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - f0bak = np.zeros(x.shape[0] // 160, dtype=np.float64) - length = len(x) - part_length = int(length / n_cpu / 160) * 160 - ts = ttime() - res_f0 = mm.dict() - for idx in range(n_cpu): - tail = part_length * (idx + 1) + 320 - if idx == 0: - self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts)) - else: - self.inp_q.put( - (idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts) - ) - while 1: - res_ts = self.opt_q.get() - if res_ts == ts: - break - f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])] - for idx, f0 in enumerate(f0s): - if idx == 0: - f0 = f0[:-3] - elif idx != n_cpu - 1: - f0 = f0[2:-3] - else: - f0 = f0[2:-1] - f0bak[ - part_length * idx // 160 : part_length * idx // 160 + f0.shape[0] - ] = f0 - f0bak = signal.medfilt(f0bak, 3) - f0bak *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0bak) - - def get_f0_crepe(self, x, f0_up_key): - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - 160, - self.f0_min, - self.f0_max, - "full", - batch_size=512, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - - def get_f0_rmvpe(self, x, f0_up_key): - if hasattr(self, "model_rmvpe") == False: - from infer.lib.rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - - def infer( - self, - feats: torch.Tensor, - indata: np.ndarray, - rate1, - rate2, - cache_pitch, - cache_pitchf, - f0method, - ) -> np.ndarray: - feats = feats.view(1, -1) - if config.is_half: - feats = feats.half() - else: - feats = feats.float() - feats = feats.to(self.device) - t1 = ttime() - with torch.no_grad(): - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - inputs = { - "source": feats, - "padding_mask": padding_mask, - "output_layer": 9 if self.version == "v1" else 12, - } - logits = self.model.extract_features(**inputs) - feats = ( - self.model.final_proj(logits[0]) if self.version == "v1" else logits[0] - ) - t2 = ttime() - try: - if hasattr(self, "index") and self.index_rate != 0: - leng_replace_head = int(rate1 * feats[0].shape[0]) - npy = feats[0][-leng_replace_head:].cpu().numpy().astype("float32") - score, ix = self.index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if config.is_half: - npy = npy.astype("float16") - feats[0][-leng_replace_head:] = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate - + (1 - self.index_rate) * feats[0][-leng_replace_head:] - ) - else: - print("index search FAIL or disabled") - except: - traceback.print_exc() - print("index search FAIL") - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t3 = ttime() - if self.if_f0 == 1: - pitch, pitchf = self.get_f0(indata, self.f0_up_key, self.n_cpu, f0method) - cache_pitch[:] = np.append(cache_pitch[pitch[:-1].shape[0] :], pitch[:-1]) - cache_pitchf[:] = np.append( - cache_pitchf[pitchf[:-1].shape[0] :], pitchf[:-1] - ) - p_len = min(feats.shape[1], 13000, cache_pitch.shape[0]) - else: - cache_pitch, cache_pitchf = None, None - p_len = min(feats.shape[1], 13000) - t4 = ttime() - feats = feats[:, :p_len, :] - if self.if_f0 == 1: - cache_pitch = cache_pitch[:p_len] - cache_pitchf = cache_pitchf[:p_len] - cache_pitch = torch.LongTensor(cache_pitch).unsqueeze(0).to(self.device) - cache_pitchf = torch.FloatTensor(cache_pitchf).unsqueeze(0).to(self.device) - p_len = torch.LongTensor([p_len]).to(self.device) - ii = 0 # sid - sid = torch.LongTensor([ii]).to(self.device) - with torch.no_grad(): - if self.if_f0 == 1: - infered_audio = ( - self.net_g.infer( - feats, p_len, cache_pitch, cache_pitchf, sid, rate2 - )[0][0, 0] - .data.cpu() - .float() - ) - else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid, rate2)[0][0, 0] - .data.cpu() - .float() - ) - t5 = ttime() - print("time->fea-index-f0-model:", t2 - t1, t3 - t2, t4 - t3, t5 - t4) - return infered_audio diff --git a/spaces/AIFILMS/StyleGANEX/app.py b/spaces/AIFILMS/StyleGANEX/app.py deleted file mode 100644 index 022debd0728451b2b850833b5e6640ade823e428..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/StyleGANEX/app.py +++ /dev/null @@ -1,124 +0,0 @@ -from __future__ import annotations - -import argparse -import pathlib -import torch -import gradio as gr - -import os - -from webUI.app_task import * -from webUI.styleganex_model import Model - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--theme', type=str) - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - return parser.parse_args() - -is_shared_ui = True if "AIFILMS/StyleGANEX" in os.environ['SPACE_ID'] else False - -DESCRIPTION = ''' -
-

- Face Manipulation with StyleGANEX -

-

For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. -Duplicate Space

-

-example -

-''' -ARTICLE = r""" -If StyleGANEX is helpful, please help to ⭐ the Github Repo. Thanks! -[![GitHub Stars](https://img.shields.io/github/stars/williamyang1991/StyleGANEX?style=social)](https://github.com/williamyang1991/StyleGANEX) ---- -📝 **Citation** -If our work is useful for your research, please consider citing: -```bibtex -@article{yang2023styleganex, - title = {StyleGANEX: StyleGAN-Based Manipulation Beyond Cropped Aligned Faces}, - author = {Yang, Shuai and Jiang, Liming and Liu, Ziwei and and Loy, Chen Change}, - journal = {arXiv preprint arXiv:2303.06146}, - year={2023}, -} -``` -📋 **License** -This project is licensed under S-Lab License 1.0. -Redistribution and use for non-commercial purposes should follow this license. - -📧 **Contact** -If you have any questions, please feel free to reach me out at williamyang@pku.edu.cn. -""" - -FOOTER = '
visitor badge
' - -def main(): - args = parse_args() - args.device = 'cuda' if torch.cuda.is_available() else 'cpu' - print('*** Now using %s.'%(args.device)) - model = Model(device=args.device) - - - torch.hub.download_url_to_file('https://raw.githubusercontent.com/williamyang1991/StyleGANEX/main/data/234_sketch.jpg', - '234_sketch.jpg') - torch.hub.download_url_to_file('https://github.com/williamyang1991/StyleGANEX/raw/main/output/ILip77SbmOE_inversion.pt', - 'ILip77SbmOE_inversion.pt') - torch.hub.download_url_to_file('https://raw.githubusercontent.com/williamyang1991/StyleGANEX/main/data/ILip77SbmOE.png', - 'ILip77SbmOE.png') - torch.hub.download_url_to_file('https://raw.githubusercontent.com/williamyang1991/StyleGANEX/main/data/ILip77SbmOE_mask.png', - 'ILip77SbmOE_mask.png') - torch.hub.download_url_to_file('https://raw.githubusercontent.com/williamyang1991/StyleGANEX/main/data/pexels-daniel-xavier-1239291.jpg', - 'pexels-daniel-xavier-1239291.jpg') - torch.hub.download_url_to_file('https://github.com/williamyang1991/StyleGANEX/raw/main/data/529_2.mp4', - '529_2.mp4') - torch.hub.download_url_to_file('https://github.com/williamyang1991/StyleGANEX/raw/main/data/684.mp4', - '684.mp4') - torch.hub.download_url_to_file('https://github.com/williamyang1991/StyleGANEX/raw/main/data/pexels-anthony-shkraba-production-8136210.mp4', - 'pexels-anthony-shkraba-production-8136210.mp4') - - - with gr.Blocks(css='style.css') as demo: - if(is_shared_ui): - with gr.Box(): - top_description = gr.HTML(f''' -
-

Attention - This Space doesn't work in this shared UI

-

For it to work, you can access the original or duplicate this Space and run it on your own profile using a GPU.  Duplicate Space

-
- ''') - gr.Markdown(DESCRIPTION) - with gr.Tabs(): - with gr.TabItem('Inversion for Editing'): - create_demo_inversion(model.process_inversion, allow_optimization=False) - with gr.TabItem('Image Face Toonify'): - create_demo_toonify(model.process_toonify) - with gr.TabItem('Video Face Toonify'): - create_demo_vtoonify(model.process_vtoonify, max_frame_num=12) - with gr.TabItem('Image Face Editing'): - create_demo_editing(model.process_editing) - with gr.TabItem('Video Face Editing'): - create_demo_vediting(model.process_vediting, max_frame_num=12) - with gr.TabItem('Sketch2Face'): - create_demo_s2f(model.process_s2f) - with gr.TabItem('Mask2Face'): - create_demo_m2f(model.process_m2f) - with gr.TabItem('SR'): - create_demo_sr(model.process_sr) - gr.Markdown(ARTICLE) - gr.Markdown(FOOTER) - - demo.launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - -if __name__ == '__main__': - main() - diff --git a/spaces/AIFILMS/StyleGANEX/configs/__init__.py b/spaces/AIFILMS/StyleGANEX/configs/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AIKey/ai_date/style.css b/spaces/AIKey/ai_date/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/AIKey/ai_date/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/AIZero2HeroBootcamp/AnimatedGifGallery/app.py b/spaces/AIZero2HeroBootcamp/AnimatedGifGallery/app.py deleted file mode 100644 index ab7cb9583171b765412463f9c8d16b14f2a25d59..0000000000000000000000000000000000000000 --- a/spaces/AIZero2HeroBootcamp/AnimatedGifGallery/app.py +++ /dev/null @@ -1,52 +0,0 @@ -import streamlit as st -import os -import random - -def get_gifs(directory): - return [f for f in os.listdir(directory) if f.endswith('.gif')] - -def showAnimatedGif(gif): - import streamlit as st - import base64 - #st.markdown("![Alt Text](https://media.giphy.com/media/vFKqnCdLPNOKc/giphy.gif)") - st.write('Loading: ' + gif) - file_ = open(gif, "rb") - contents = file_.read() - data_url = base64.b64encode(contents).decode("utf-8") - file_.close() - st.write(data_url) - - st.markdown( - f'gif', - unsafe_allow_html=True, - ) - -def main(): - st.title('Animated GIFs in Streamlit') - - directory = './gifs' # Replace with your directory of GIFs - gif_files = get_gifs(directory) - - num_rows = len(gif_files) // 3 - if len(gif_files) % 3: - num_rows += 1 - - cols = [st.columns(3) for _ in range(num_rows)] - - for i in range(num_rows): - for j in range(3): - idx = i*3 + j - if idx < len(gif_files): - #showAnimatedGif(os.path.join(directory, gif_files[idx])) - cols[i][j].image(os.path.join(directory, gif_files[idx]), width=200) - - if st.button('Randomize'): - random.shuffle(gif_files) - for i in range(num_rows): - for j in range(3): - idx = i*3 + j - if idx < len(gif_files): - cols[i][j].image(os.path.join(directory, gif_files[idx]), width=200) - -if __name__ == "__main__": - main() diff --git a/spaces/ARTeLab/ARTeLab-SummIT/README.md b/spaces/ARTeLab/ARTeLab-SummIT/README.md deleted file mode 100644 index bb93ae203d08bbe8ce08d83803038a4933b63148..0000000000000000000000000000000000000000 --- a/spaces/ARTeLab/ARTeLab-SummIT/README.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: ARTeLab SummIT -emoji: 📰 -colorFrom: indigo -colorTo: green -sdk: streamlit -app_file: app.py -pinned: false ---- -# Configuration -`title`: _string_ -Display title for the Space -`emoji`: _string_ -Space emoji (emoji-only character allowed) -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) -`sdk`: _string_ -Can be either `gradio` or `streamlit` -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. \ No newline at end of file diff --git a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/transformer.py b/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/transformer.py deleted file mode 100644 index e69cca829d774d0b8b36c0de9b7924373da81b43..0000000000000000000000000000000000000000 --- a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/transformer.py +++ /dev/null @@ -1,747 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Transformer model, with streaming support, xformer attention support -and easy causal attention with a potentially finite receptive field. - -See `StreamingTransformer` for more information. - -Unlike regular PyTorch Transformer, we make the hard choice that batches are first. -""" - -import typing as tp - -from einops import rearrange -import torch -import torch.nn as nn -from torch.nn import functional as F -from torch.utils.checkpoint import checkpoint as torch_checkpoint -from xformers import ops - -from .rope import RotaryEmbedding -from .streaming import StreamingModule - -_efficient_attention_backend: str = 'torch' - - -def set_efficient_attention_backend(backend: str = 'torch'): - # Using torch by default, it seems a bit faster on older P100 GPUs (~20% faster). - global _efficient_attention_backend - assert _efficient_attention_backend in ['xformers', 'torch'] - _efficient_attention_backend = backend - - -def _get_attention_time_dimension() -> int: - if _efficient_attention_backend == 'torch': - return 2 - else: - return 1 - - -def _is_profiled() -> bool: - # Return true if we are currently running with a xformers profiler activated. - try: - from xformers.profiler import profiler - except ImportError: - return False - return profiler._Profiler._CURRENT_PROFILER is not None - - -def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module: - """Create normalization module for transformer encoder layer. - - Args: - norm_type (str): Normalization method. - dim (int): Dimension of the normalized layer. - **kwargs (dict): Additional parameters for normalization layer. - Returns: - nn.Module: Normalization module. - """ - if norm_type == 'layer_norm': - return nn.LayerNorm(dim, eps=1e-5, **kwargs) - else: - raise ValueError(f"Unknown norm type: {norm_type}") - - -def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000, - dtype: torch.dtype = torch.float32) -> torch.Tensor: - """Create sinusoidal positional embedding, with shape `[B, T, C]`. - - Args: - positions (torch.Tensor): LongTensor of positions. - dim (int): Dimension of the embedding. - max_period (float): Maximum period of the cosine/sine functions. - dtype (torch.dtype or str): dtype to use to generate the embedding. - Returns: - torch.Tensor: Sinusoidal positional embedding. - """ - # We aim for BTC format - assert dim % 2 == 0 - half_dim = dim // 2 - positions = positions.to(dtype) - adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1) - max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point - phase = positions / (max_period_tensor ** (adim / (half_dim - 1))) - return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1) - - -def expand_repeated_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: - """torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers""" - if n_rep == 1: - return x - if _efficient_attention_backend == 'torch': - bs, n_kv_heads, slen, head_dim = x.shape - return ( - x[:, :, None, :, :] - .expand(bs, n_kv_heads, n_rep, slen, head_dim) - .reshape(bs, n_kv_heads * n_rep, slen, head_dim) - ) - else: - bs, slen, n_kv_heads, head_dim = x.shape - return ( - x[:, :, :, None, :] - .expand(bs, slen, n_kv_heads, n_rep, head_dim) - .reshape(bs, slen, n_kv_heads * n_rep, head_dim) - ) - - -class LayerScale(nn.Module): - """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). - This rescales diagonaly the residual outputs close to 0, with a learnt scale. - - Args: - channels (int): Number of channels. - init (float): Initial scale. - channel_last (bool): If True, expect `[*, C]` shaped tensors, otherwise, `[*, C, T]`. - device (torch.device or None): Device on which to initialize the module. - dtype (torch.dtype or None): dtype to use to initialize the module. - """ - def __init__(self, channels: int, init: float = 1e-4, channel_last: bool = True, - device=None, dtype=None): - super().__init__() - self.channel_last = channel_last - self.scale = nn.Parameter( - torch.full((channels,), init, - requires_grad=True, device=device, dtype=dtype)) - - def forward(self, x: torch.Tensor): - if self.channel_last: - return self.scale * x - else: - return self.scale[:, None] * x - - -class StreamingMultiheadAttention(StreamingModule): - """Similar to `nn.MultiheadAttention` but with support for streaming, causal evaluation. - - Args: - embed_dim (int): Dimension to project to. - num_heads (int): Number of heads. - dropout (float): Dropout level. - bias (bool): Use bias in projections. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - rope (`RotaryEmbedding` or None): Rope embedding to use. - cross_attention: Should be true when used as a cross attention. - All keys and values must be available at once, streaming is only for the queries. - Cannot be used with `causal` or `rope` (as it wouldn't make sens to - intepret the time steps in the keys relative to those in the queries). - safe_streaming (bool): Bug fix, will go away with xformers update. - qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product. - kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). - This will lead to faster decoding time on A100 or other GPUs with tensorcore. - device (torch.device or None): Sevice on which to initialize. - dtype (torch.dtype or None): dtype to use. - """ - def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, - causal: bool = False, past_context: tp.Optional[int] = None, custom: bool = False, - memory_efficient: bool = False, attention_as_float32: bool = False, - rope: tp.Optional[RotaryEmbedding] = None, cross_attention: bool = False, - safe_streaming: bool = True, qk_layer_norm: bool = False, kv_repeat: int = 1, - device=None, dtype=None): - super().__init__() - factory_kwargs = {'device': device, 'dtype': dtype} - if past_context is not None: - assert causal - - self.embed_dim = embed_dim - self.causal = causal - self.past_context = past_context - self.memory_efficient = memory_efficient - self.attention_as_float32 = attention_as_float32 - self.rope = rope - self.cross_attention = cross_attention - self.safe_streaming = safe_streaming - self.num_heads = num_heads - self.dropout = dropout - self.kv_repeat = kv_repeat - if cross_attention: - assert not causal, "Causal cannot work with cross attention." - assert rope is None, "Rope cannot work with cross attention." - - if memory_efficient: - _verify_xformers_memory_efficient_compat() - - self.custom = _is_custom(custom, memory_efficient) - if self.custom: - out_dim = embed_dim - assert num_heads % kv_repeat == 0 - assert not cross_attention or kv_repeat == 1 - num_kv = num_heads // kv_repeat - kv_dim = (embed_dim // num_heads) * num_kv - out_dim += 2 * kv_dim - in_proj = nn.Linear(embed_dim, out_dim, bias=bias, **factory_kwargs) - # We try to follow the default PyTorch MHA convention, to easily compare results. - self.in_proj_weight = in_proj.weight - self.in_proj_bias = in_proj.bias - if bias: - self.in_proj_bias.data.zero_() # Following Pytorch convention - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs) - if bias: - self.out_proj.bias.data.zero_() - else: - assert not qk_layer_norm - assert kv_repeat == 1 - self.mha = nn.MultiheadAttention( - embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True, - **factory_kwargs) - self.qk_layer_norm = qk_layer_norm - if qk_layer_norm: - assert self.custom - assert kv_repeat == 1 - ln_dim = embed_dim - self.q_layer_norm = nn.LayerNorm(ln_dim) - self.k_layer_norm = nn.LayerNorm(ln_dim) - - def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs): - if not self.custom: - # Support compat with regular MHA - keys = [n for n, _ in self.mha.named_parameters()] - for key in keys: - if prefix + key in state_dict: - state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key) - super()._load_from_state_dict(state_dict, prefix, *args, **kwargs) - - def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype): - # Return a causal mask, accounting for potentially stored past keys/values - # We actually return a bias for the attention score, as this has the same - # convention both in the builtin MHA in Pytorch, and Xformers functions. - time_dim = _get_attention_time_dimension() - if self.memory_efficient: - from xformers.ops import LowerTriangularMask - if current_steps == 1: - # If we only have one step, then we do not need a mask. - return None - elif 'past_keys' in self._streaming_state: - raise RuntimeError('Not supported at the moment') - else: - # Then we can safely use a lower triangular mask - return LowerTriangularMask() - if self._streaming_state: - past_keys = self._streaming_state['past_keys'] - past_steps = past_keys.shape[time_dim] - else: - past_steps = 0 - - queries_pos = torch.arange( - past_steps, current_steps + past_steps, device=device).view(-1, 1) - keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1) - delta = queries_pos - keys_pos - valid = delta >= 0 - if self.past_context is not None: - valid &= (delta <= self.past_context) - return torch.where( - valid, - torch.zeros([], device=device, dtype=dtype), - torch.full([], float('-inf'), device=device, dtype=dtype)) - - def _complete_kv(self, k, v): - time_dim = _get_attention_time_dimension() - if self.cross_attention: - # With cross attention we assume all keys and values - # are already available, and streaming is with respect - # to the queries only. - return k, v - # Complete the key/value pair using the streaming state. - if self._streaming_state: - pk = self._streaming_state['past_keys'] - nk = torch.cat([pk, k], dim=time_dim) - if v is k: - nv = nk - else: - pv = self._streaming_state['past_values'] - nv = torch.cat([pv, v], dim=time_dim) - else: - nk = k - nv = v - - assert nk.shape[time_dim] == nv.shape[time_dim] - offset = 0 - if self.past_context is not None: - offset = max(0, nk.shape[time_dim] - self.past_context) - if self._is_streaming: - self._streaming_state['past_keys'] = nk[:, offset:] - if v is not k: - self._streaming_state['past_values'] = nv[:, offset:] - if 'offset' in self._streaming_state: - self._streaming_state['offset'] += offset - else: - self._streaming_state['offset'] = torch.tensor(0) - return nk, nv - - def _apply_rope(self, query: torch.Tensor, key: torch.Tensor): - # TODO: fix and verify layout. - assert _efficient_attention_backend == 'xformers', 'Rope not supported with torch attn.' - # Apply rope embeddings to query and key tensors. - assert self.rope is not None - if 'past_keys' in self._streaming_state: - past_keys_offset = self._streaming_state['past_keys'].shape[1] - else: - past_keys_offset = 0 - if 'offset' in self._streaming_state: - past_context_offset = int(self._streaming_state['offset'].item()) - else: - past_context_offset = 0 - streaming_offset = past_context_offset + past_keys_offset - return self.rope.rotate_qk(query, key, start=streaming_offset) - - def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, - key_padding_mask=None, need_weights=False, attn_mask=None, - average_attn_weights=True, is_causal=False): - assert attn_mask is None - assert not is_causal, ("new param added in torch 2.0.1 not supported, " - "use the causal args in the constructor.") - - time_dim = _get_attention_time_dimension() - if time_dim == 2: - layout = "b h t d" - else: - layout = "b t h d" - dtype = query.dtype - if self._is_streaming: - assert self.causal or self.cross_attention, \ - "Streaming only available for causal or cross attention" - - if self.causal: - # At the moment we specialize only for the self-attention case. - assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value" - assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value" - attn_mask = self._get_mask(query.shape[1], query.device, query.dtype) - - if self.custom: - # custom implementation - assert need_weights is False - assert key_padding_mask is None - if self.cross_attention: - # Different queries, keys, values, we have to spit manually the weights - # before applying the linear. - dim = self.in_proj_weight.shape[0] // 3 - if self.in_proj_bias is None: - bias_q, bias_k, bias_v = None, None, None - else: - bias_q = self.in_proj_bias[:dim] - bias_k = self.in_proj_bias[dim: 2 * dim] - bias_v = self.in_proj_bias[2 * dim:] - q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q) - # todo: when streaming, we could actually save k, v and check the shape actually match. - k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k) - v = nn.functional.linear(value, self.in_proj_weight[2 * dim:], bias_v) - if self.qk_layer_norm is True: - q = self.q_layer_norm(q) - k = self.k_layer_norm(k) - q, k, v = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k, v]] - else: - if not _is_profiled(): - # profiling breaks that propertysomehow. - assert query is key, "specialized implementation" - assert value is key, "specialized implementation" - projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias) - if self.kv_repeat == 1: - if time_dim == 2: - bound_layout = "b h p t d" - else: - bound_layout = "b t p h d" - packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads) - q, k, v = ops.unbind(packed, dim=2) - else: - embed_dim = self.embed_dim - per_head_dim = (embed_dim // self.num_heads) - kv_heads = self.num_heads // self.kv_repeat - q = projected[:, :, :embed_dim] - start = embed_dim - end = start + per_head_dim * kv_heads - k = projected[:, :, start: end] - v = projected[:, :, end:] - q = rearrange(q, f"b t (h d) -> {layout}", h=self.num_heads) - k = rearrange(k, f"b t (h d) -> {layout}", h=kv_heads) - v = rearrange(v, f"b t (h d) -> {layout}", h=kv_heads) - - if self.qk_layer_norm is True: - assert self.kv_repeat == 1 - q, k = [rearrange(x, f"{layout} -> b t (h d)") for x in [q, k]] - q = self.q_layer_norm(q) - k = self.k_layer_norm(k) - q, k = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k]] - if self.rope: - q, k = self._apply_rope(q, k) - k, v = self._complete_kv(k, v) - if self.kv_repeat > 1: - k = expand_repeated_kv(k, self.kv_repeat) - v = expand_repeated_kv(v, self.kv_repeat) - if self.attention_as_float32: - q, k, v = [x.float() for x in [q, k, v]] - if self.memory_efficient: - p = self.dropout if self.training else 0 - if _efficient_attention_backend == 'torch': - x = torch.nn.functional.scaled_dot_product_attention( - q, k, v, is_causal=attn_mask is not None, dropout_p=p) - else: - x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p) - else: - # We include the dot product as float32, for consistency - # with the other implementations that include that step - # as part of the attention. Note that when using `autocast`, - # the einsums would be done as bfloat16, but the softmax - # would be done as bfloat16, so `attention_as_float32` will - # extend a bit the range of operations done in float32, - # although this should make no difference. - q = q / q.shape[-1] ** 0.5 - key_layout = layout.replace('t', 'k') - query_layout = layout - if self._is_streaming and self.safe_streaming and q.device.type == 'cuda': - with torch.autocast(device_type=q.device.type, dtype=torch.float32): - pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k) - else: - pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k) - if attn_mask is not None: - pre_w = pre_w + attn_mask - w = torch.softmax(pre_w, dim=-1) - w = F.dropout(w, self.dropout, training=self.training).to(v) - # Key and value have the same format. - x = torch.einsum(f"b h t k, {key_layout} -> {layout}", w, v) - x = x.to(dtype) - x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads) - x = self.out_proj(x) - else: - key, value = self._complete_kv(key, value) - if self.attention_as_float32: - query, key, value = [x.float() for x in [query, key, value]] - x, _ = self.mha( - query, key, value, key_padding_mask, - need_weights, attn_mask, average_attn_weights) - x = x.to(dtype) - - return x, None - - -class StreamingTransformerLayer(nn.TransformerEncoderLayer): - """TransformerLayer with Streaming / Causal support. - This also integrates cross_attention, when passing `cross_attention=True`, - rather than having two separate classes like in PyTorch. - - Args: - d_model (int): Dimension of the data. - num_heads (int): Number of heads. - dim_feedforward (int): Intermediate dimension of FF module. - dropout (float): Dropout both for MHA and FF. - bias_ff (bool): Use bias for FF. - bias_attn (bool): Use bias for MHA. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention. - qk_layer_norm_cross (bool): Same for the cross attention. - cross_attention (bool): If True, expect to get secondary input for cross-attention. - Cross attention will use the default MHA, as it typically won't require - special treatment. - layer_scale (float or None): If not None, LayerScale will be used with - the given value as initial scale. - rope (`RotaryEmbedding` or None): Rope embedding to use. - attention_dropout (float or None): If not None, separate the value of the dimension dropout - in FFN and of the attention dropout. - kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads). - This will lead to faster decoding time on A100 or other GPUs with tensorcore. - device (torch.device or None): Device on which to initialize. - dtype (torch.dtype or None): dtype to use. - **kwargs: See `nn.TransformerEncoderLayer`. - """ - def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1, - bias_ff: bool = True, bias_attn: bool = True, causal: bool = False, - past_context: tp.Optional[int] = None, custom: bool = False, - memory_efficient: bool = False, attention_as_float32: bool = False, - qk_layer_norm: bool = False, qk_layer_norm_cross: bool = False, - cross_attention: bool = False, layer_scale: tp.Optional[float] = None, - rope: tp.Optional[RotaryEmbedding] = None, attention_dropout: tp.Optional[float] = None, - kv_repeat: int = 1, norm: str = 'layer_norm', device=None, dtype=None, **kwargs): - super().__init__(d_model, num_heads, dim_feedforward, dropout, - device=device, dtype=dtype, batch_first=True, **kwargs) - factory_kwargs = {'device': device, 'dtype': dtype} - # Redefine self_attn to our streaming multi-head attention - attn_kwargs: tp.Dict[str, tp.Any] = { - 'embed_dim': d_model, - 'num_heads': num_heads, - 'dropout': dropout if attention_dropout is None else attention_dropout, - 'bias': bias_attn, - 'custom': custom, - 'memory_efficient': memory_efficient, - 'attention_as_float32': attention_as_float32, - } - self.self_attn: StreamingMultiheadAttention = StreamingMultiheadAttention( - causal=causal, past_context=past_context, rope=rope, qk_layer_norm=qk_layer_norm, - kv_repeat=kv_repeat, **attn_kwargs, **factory_kwargs) # type: ignore - # Redefine feedforward layers to expose bias parameter - self.linear1 = nn.Linear(d_model, dim_feedforward, bias=bias_ff, **factory_kwargs) - self.linear2 = nn.Linear(dim_feedforward, d_model, bias=bias_ff, **factory_kwargs) - - self.layer_scale_1: nn.Module - self.layer_scale_2: nn.Module - if layer_scale is None: - self.layer_scale_1 = nn.Identity() - self.layer_scale_2 = nn.Identity() - else: - self.layer_scale_1 = LayerScale(d_model, layer_scale, **factory_kwargs) - self.layer_scale_2 = LayerScale(d_model, layer_scale, **factory_kwargs) - - self.cross_attention: tp.Optional[nn.Module] = None - if cross_attention: - self.cross_attention = StreamingMultiheadAttention( - cross_attention=True, qk_layer_norm=qk_layer_norm_cross, - **attn_kwargs, **factory_kwargs) - # Norm and dropout - self.dropout_cross = nn.Dropout(dropout) - # eps value matching that used in PyTorch reference implementation. - self.norm_cross = nn.LayerNorm(d_model, eps=1e-5, **factory_kwargs) - self.layer_scale_cross: nn.Module - if layer_scale is None: - self.layer_scale_cross = nn.Identity() - else: - self.layer_scale_cross = LayerScale(d_model, layer_scale, **factory_kwargs) - self.norm1 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore - self.norm2 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore - - def _cross_attention_block(self, src: torch.Tensor, - cross_attention_src: torch.Tensor) -> torch.Tensor: - assert self.cross_attention is not None - # queries are from src, keys and values from cross_attention_src. - x = self.cross_attention( - src, cross_attention_src, cross_attention_src, need_weights=False)[0] - return self.dropout_cross(x) # type: ignore - - def forward(self, src: torch.Tensor, src_mask: tp.Optional[torch.Tensor] = None, # type: ignore - src_key_padding_mask: tp.Optional[torch.Tensor] = None, - cross_attention_src: tp.Optional[torch.Tensor] = None): - if self.cross_attention is None: - assert cross_attention_src is None - else: - assert cross_attention_src is not None - x = src - if self.norm_first: - x = x + self.layer_scale_1( - self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)) - if cross_attention_src is not None: - x = x + self.layer_scale_cross( - self._cross_attention_block( - self.norm_cross(x), cross_attention_src)) - x = x + self.layer_scale_2(self._ff_block(self.norm2(x))) - else: - x = self.norm1(x + self.layer_scale_1( - self._sa_block(x, src_mask, src_key_padding_mask))) - if cross_attention_src is not None: - x = self.norm_cross( - x + self.layer_scale_cross( - self._cross_attention_block(src, cross_attention_src))) - x = self.norm2(x + self.layer_scale_2(self._ff_block(x))) - return x - - -class StreamingTransformer(StreamingModule): - """Transformer with Streaming / Causal support. - - Args: - d_model (int): Dimension of the data. - num_heads (int): Number of heads. - dim_feedforward (int): Intermediate dimension of FF module. - dropout (float): Dropout both for MHA and FF. - bias_ff (bool): Use bias for FF. - bias_attn (bool): Use bias for MHA. - causal (bool): Causal mask applied automatically. - past_context (int or None): Receptive field for the causal mask, infinite if None. - custom (bool): Use custom MHA implementation, for testing / benchmarking. - memory_efficient (bool): Use xformers based memory efficient attention. - attention_as_float32 (bool): Perform the attention as float32 - (especially important with memory_efficient as autocast won't do this automatically). - cross_attention (bool): If True, expect to get secondary input for cross-attention. - layer_scale (float or None): If not None, LayerScale will be used - with the given value as initial scale. - positional_embedding (str): Positional embedding strategy (sin, rope, or sin_rope). - max_period (float): Maximum period of the time embedding. - positional_scale (float): Scale of positional embedding, set to 0 to deactivate. - xpos (bool): Apply xpos exponential decay to positional embedding (rope only). - lr (float or None): learning rate override through the `make_optim_group` API. - weight_decay (float or None): Weight_decay override through the `make_optim_group` API. - layer_class: (subclass of `StreamingTransformerLayer): class to use - to initialize the layers, allowing further customization outside of Audiocraft. - checkpointing (str): Checkpointing strategy to reduce memory usage. - No checkpointing if set to 'none'. Per layer checkpointing using PyTorch - if set to 'torch' (entire layer checkpointed, i.e. linears are evaluated twice, - minimal memory usage, but maximal runtime). Finally, `xformers_default` provide - a policy for opting-out some operations of the checkpointing like - linear layers and attention, providing a middle ground between speed and memory. - device (torch.device or None): Device on which to initialize. - dtype (torch.dtype or None): dtype to use. - **kwargs: See `nn.TransformerEncoderLayer`. - """ - def __init__(self, d_model: int, num_heads: int, num_layers: int, dim_feedforward: int = 2048, - dropout: float = 0.1, bias_ff: bool = True, bias_attn: bool = True, - causal: bool = False, past_context: tp.Optional[int] = None, - custom: bool = False, memory_efficient: bool = False, attention_as_float32: bool = False, - cross_attention: bool = False, layer_scale: tp.Optional[float] = None, - positional_embedding: str = 'sin', max_period: float = 10_000, positional_scale: float = 1., - xpos: bool = False, lr: tp.Optional[float] = None, weight_decay: tp.Optional[float] = None, - layer_class: tp.Type[StreamingTransformerLayer] = StreamingTransformerLayer, - checkpointing: str = 'none', device=None, dtype=None, **kwargs): - super().__init__() - assert d_model % num_heads == 0 - - self.positional_embedding = positional_embedding - self.max_period = max_period - self.positional_scale = positional_scale - self.weight_decay = weight_decay - self.lr = lr - - assert positional_embedding in ['sin', 'rope', 'sin_rope'] - self.rope: tp.Optional[RotaryEmbedding] = None - if self.positional_embedding in ['rope', 'sin_rope']: - assert _is_custom(custom, memory_efficient) - self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period, - xpos=xpos, scale=positional_scale, device=device) - - self.checkpointing = checkpointing - - assert checkpointing in ['none', 'torch', 'xformers_default', 'xformers_mm'] - if self.checkpointing.startswith('xformers'): - _verify_xformers_internal_compat() - - self.layers = nn.ModuleList() - for idx in range(num_layers): - self.layers.append( - layer_class( - d_model=d_model, num_heads=num_heads, dim_feedforward=dim_feedforward, - dropout=dropout, bias_ff=bias_ff, bias_attn=bias_attn, - causal=causal, past_context=past_context, custom=custom, - memory_efficient=memory_efficient, attention_as_float32=attention_as_float32, - cross_attention=cross_attention, layer_scale=layer_scale, rope=self.rope, - device=device, dtype=dtype, **kwargs)) - - if self.checkpointing != 'none': - for layer in self.layers: - # see audiocraft/optim/fsdp.py, magic signal to indicate this requires fixing the - # backward hook inside of FSDP... - layer._magma_checkpointed = True # type: ignore - assert layer.layer_drop == 0., "Need further checking" # type: ignore - - def _apply_layer(self, layer, *args, **kwargs): - method = self.checkpointing - if method == 'none': - return layer(*args, **kwargs) - elif method == 'torch': - return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs) - elif method.startswith('xformers'): - from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy - if method == 'xformers_default': - # those operations will be saved, and not recomputed. - # According to Francisco we can get smarter policies but this is a good start. - allow_list = [ - "xformers.efficient_attention_forward_cutlass.default", - "xformers_flash.flash_fwd.default", - "aten.addmm.default", - "aten.mm.default", - ] - elif method == 'xformers_mm': - # those operations will be saved, and not recomputed. - # According to Francisco we can get smarter policies but this is a good start. - allow_list = [ - "aten.addmm.default", - "aten.mm.default", - ] - else: - raise ValueError(f"xformers checkpointing xformers policy {method} is not known.") - policy_fn = _get_default_policy(allow_list) - return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs) - else: - raise ValueError(f"Checkpointing method {method} is unknown.") - - def forward(self, x: torch.Tensor, *args, **kwargs): - B, T, C = x.shape - - if 'offsets' in self._streaming_state: - offsets = self._streaming_state['offsets'] - else: - offsets = torch.zeros(B, dtype=torch.long, device=x.device) - - if self.positional_embedding in ['sin', 'sin_rope']: - positions = torch.arange(T, device=x.device).view(1, -1, 1) - positions = positions + offsets.view(-1, 1, 1) - pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype) - x = x + self.positional_scale * pos_emb - - for layer in self.layers: - x = self._apply_layer(layer, x, *args, **kwargs) - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return x - - def make_optim_group(self): - group = {"params": list(self.parameters())} - if self.lr is not None: - group["lr"] = self.lr - if self.weight_decay is not None: - group["weight_decay"] = self.weight_decay - return group - - -# special attention attention related function - -def _verify_xformers_memory_efficient_compat(): - try: - from xformers.ops import memory_efficient_attention, LowerTriangularMask # noqa - except ImportError: - raise ImportError( - "xformers is not installed. Please install it and try again.\n" - "To install on AWS and Azure, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" - "To install on FAIR Cluster, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") - - -def _verify_xformers_internal_compat(): - try: - from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy # noqa - except ImportError: - raise ImportError( - "Francisco's fairinternal xformers is not installed. Please install it and try again.\n" - "To install on AWS and Azure, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n" - "To install on FAIR Cluster, run \n" - "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n" - "pip install -U git+https://git@github.com/fairinternal/xformers.git#egg=xformers\n") - - -def _is_custom(custom: bool, memory_efficient: bool): - return custom or memory_efficient diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/login/+page.server.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/login/+page.server.ts deleted file mode 100644 index 28692b5304687ce69551c5015d71a4419069415a..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/login/+page.server.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { redirect } from "@sveltejs/kit"; -import { getOIDCAuthorizationUrl } from "$lib/server/auth"; -import { base } from "$app/paths"; - -export const actions = { - default: async function ({ url, locals, request }) { - // TODO: Handle errors if provider is not responding - const referer = request.headers.get("referer"); - const authorizationUrl = await getOIDCAuthorizationUrl( - { redirectURI: `${(referer ? new URL(referer) : url).origin}${base}/login/callback` }, - { sessionId: locals.sessionId } - ); - - throw redirect(303, authorizationUrl); - }, -}; diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/phind.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/phind.py deleted file mode 100644 index 70525d51d849c43bd1cf29c7f9b18f22bff1e982..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helpers/phind.py +++ /dev/null @@ -1,69 +0,0 @@ -import sys -import json -import datetime -import urllib.parse - -from curl_cffi import requests - -config = json.loads(sys.argv[1]) -prompt = config['messages'][-1]['content'] - -skill = 'expert' if config['model'] == 'gpt-4' else 'intermediate' - -json_data = json.dumps({ - 'question': prompt, - 'options': { - 'skill': skill, - 'date': datetime.datetime.now().strftime('%d/%m/%Y'), - 'language': 'en', - 'detailed': True, - 'creative': True, - 'customLinks': []}}, separators=(',', ':')) - -headers = { - 'Content-Type': 'application/json', - 'Pragma': 'no-cache', - 'Accept': '*/*', - 'Sec-Fetch-Site': 'same-origin', - 'Accept-Language': 'en-GB,en;q=0.9', - 'Cache-Control': 'no-cache', - 'Sec-Fetch-Mode': 'cors', - 'Content-Length': str(len(json_data)), - 'Origin': 'https://www.phind.com', - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15', - 'Referer': f'https://www.phind.com/search?q={urllib.parse.quote(prompt)}&source=searchbox', - 'Connection': 'keep-alive', - 'Host': 'www.phind.com', - 'Sec-Fetch-Dest': 'empty' -} - - -def output(chunk): - try: - if b'PHIND_METADATA' in chunk: - return - - if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n': - chunk = b'data: \n\r\n\r\n' - - chunk = chunk.decode() - - chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n') - chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n') - chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '') - - print(chunk, flush=True, end = '') - - except json.decoder.JSONDecodeError: - pass - -while True: - try: - response = requests.post('https://www.phind.com/api/infer/answer', - headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5') - - exit(0) - - except Exception as e: - print('an error occured, retrying... |', e, flush=True) - continue \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/utils/ClearChildren.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/utils/ClearChildren.js deleted file mode 100644 index 6a527b2be5b32c08db23656af96d16ed637bd0a5..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/utils/ClearChildren.js +++ /dev/null @@ -1,29 +0,0 @@ -import Container from '../../container/Container.js'; - -const ContainerClear = Container.prototype.clear; - -var ClearChildren = function (destroyChild) { - if (this.backgroundChildren) { - this.backgroundChildren.length = 0; - } - - var fireRemoveEvent = !destroyChild && this.sizerEventsEnable; - var children; - if (fireRemoveEvent) { - children = this.getChildren([]); - } - - ContainerClear.call(this, destroyChild); - - if (fireRemoveEvent) { - var gameObject; - for (var i = 0, cnt = children.length; i < cnt; i++) { - gameObject = children[i]; - gameObject.emit('sizer.remove', gameObject, this); - this.emit('remove', gameObject, this); - } - } - return this; -} - -export default ClearChildren; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/Slider.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/Slider.d.ts deleted file mode 100644 index f94fe29277b5b7f94748fa5a55ab54e0914b1b20..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/slider/Slider.d.ts +++ /dev/null @@ -1,58 +0,0 @@ -// import * as Phaser from 'phaser'; -import Sizer from '../sizer/Sizer'; -import RoundRecrangle from '../../../plugins/roundrectangle'; - - -export default Slider; - -declare namespace Slider { - - type InputTypes = 0 | 1 | -1 | 'drag' | 'pan' | 'click' | 'none'; - - interface IConfig extends Sizer.IConfig { - reverseAxis?: boolean, - background?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig, - track?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig, - indicator?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig, - thumb?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig, - - input?: InputTypes, - - gap?: number, - - value?: number, - min?: number, max?: number, - - easeValue?: { - duration?: number, - ease?: string - }, - - valuechangeCallback: (newValue: number, oldValue: number, slider: Slider) => void, - - enable?: boolean, - } -} - -declare class Slider extends Sizer { - constructor( - scene: Phaser.Scene, - config?: Slider.IConfig - ); - - value: number; - getValue(min?: number, max?: number): number; - setValue(value?: number, min?: number, max?: number): this; - addValue(inc?: number, min?: number, max?: number): this; - - easeValueTo(value?: number, min?: number, max?: number): this; - stopEaseValue(): this; - setEaseValueDuration(duration: number): this; - setEaseValueFunction(ease: string): this; - - setGap(gap?: number, min?: number, max?: number): this; - gap: number; - - setEnable(enable?: boolean): this; - enable: boolean; -} \ No newline at end of file diff --git a/spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/NLP/utilities.pm b/spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/NLP/utilities.pm deleted file mode 100644 index 7be117449190533d826bd63b9266c1434d00408f..0000000000000000000000000000000000000000 --- a/spaces/Akmyradov/TurkmenTTSweSTT/uroman/lib/NLP/utilities.pm +++ /dev/null @@ -1,3652 +0,0 @@ -################################################################ -# # -# utilities # -# # -################################################################ - -package NLP::utilities; - -use File::Spec; -use Time::HiRes qw(time); -use Time::Local; -use NLP::English; -use NLP::UTF8; - -$utf8 = NLP::UTF8; -$englishPM = NLP::English; - -%empty_ht = (); - -use constant DEBUGGING => 0; - -sub member { - local($this,$elem,@array) = @_; - - my $a; - if (defined($elem)) { - foreach $a (@array) { - if (defined($a)) { - return 1 if $elem eq $a; - } else { - $DB::single = 1; # debugger breakpoint - print STDERR "\nWarning: Undefined variable utilities::member::a\n"; - } - } - } else { - $DB::single = 1; # debugger breakpoint - print STDERR "\nWarning: Undefined variable utilities::member::elem\n"; - } - return 0; -} - -sub dual_member { - local($this,$elem1,$elem2,*array1,*array2) = @_; - # returns 1 if there exists a position $n - # such that $elem1 occurs at position $n in @array1 - # and $elem2 occurs at same position $n in @array2 - - return 0 unless defined($elem1) && defined($elem2); - my $last_index = ($#array1 < $#array2) ? $#array1 : $#array2; #min - my $a; - my $b; - foreach $i ((0 .. $last_index)) { - return 1 if defined($a = $array1[$i]) && defined($b = $array2[$i]) && ($a eq $elem1) && ($b eq $elem2); - } - return 0; -} - -sub sorted_list_equal { - local($this,*list1,*list2) = @_; - - return 0 unless $#list1 == $#list2; - foreach $i ((0 .. $#list1)) { - return 0 unless $list1[$i] eq $list2[$i]; - } - return 1; -} - -sub trim { - local($this, $s) = @_; - - $s =~ s/^\s*//; - $s =~ s/\s*$//; - $s =~ s/\s+/ /g; - return $s; -} - -sub trim2 { - local($this, $s) = @_; - - $s =~ s/^\s*//; - $s =~ s/\s*$//; - return $s; -} - -sub trim_left { - local($this, $s) = @_; - $s =~ s/^\s*//; - return $s; -} - -sub cap_member { - local($this,$elem,@array) = @_; - - my $a; - my $lc_elem = lc $elem; - foreach $a (@array) { - return $a if $lc_elem eq lc $a; - } - return ""; -} - -sub remove_elem { - local($this,$elem,@array) = @_; - - return @array unless $this->member($elem, @array); - @rm_list = (); - foreach $a (@array) { - push(@rm_list, $a) unless $elem eq $a; - } - return @rm_list; -} - -sub intersect_p { - local($this,*list1,*list2) = @_; - - foreach $elem1 (@list1) { - if (defined($elem1)) { - foreach $elem2 (@list2) { - if (defined($elem2)) { - return 1 if $elem1 eq $elem2; - } else { - $DB::single = 1; # debugger breakpoint - print STDERR "\nWarning: Undefined variable utilities::intersect_p::elem2\n"; - } - } - } else { - $DB::single = 1; # debugger breakpoint - print STDERR "\nWarning: Undefined variable utilities::intersect_p::elem1\n"; - } - } - return 0; -} - -sub intersect_expl_p { - local($this,*list1,@list2) = @_; - - foreach $elem1 (@list1) { - foreach $elem2 (@list2) { - return 1 if $elem1 eq $elem2; - } - } - return 0; -} - -sub intersection { - local($this,*list1,*list2) = @_; - - @intersection_list = (); - foreach $elem1 (@list1) { - foreach $elem2 (@list2) { - push(@intersection_list, $elem1) if ($elem1 eq $elem2) && ! $this->member($elem1, @intersection_list); - } - } - return @intersection_list; -} - -sub cap_intersect_p { - local($this,*list1,*list2) = @_; - - foreach $elem1 (@list1) { - $lc_elem1 = lc $elem1; - foreach $elem2 (@list2) { - return 1 if $lc_elem1 eq lc $elem2; - } - } - return 0; -} - -sub subset_p { - local($this,*list1,*list2) = @_; - - foreach $elem1 (@list1) { - return 0 unless $this->member($elem1, @list2); - } - return 1; -} - -sub cap_subset_p { - local($this,*list1,*list2) = @_; - - foreach $elem1 (@list1) { - return 0 unless $this->cap_member($elem1, @list2); - } - return 1; -} - -sub unique { - local($this, @list) = @_; - - my %seen = (); - @uniq = (); - foreach $item (@list) { - push(@uniq, $item) unless $seen{$item}++; - } - return @uniq; -} - -sub position { - local($this,$elem,@array) = @_; - $i = 0; - foreach $a (@array) { - return $i if $elem eq $a; - $i++; - } - return -1; -} - -sub positions { - local($this,$elem,@array) = @_; - $i = 0; - @positions_in_list = (); - foreach $a (@array) { - push(@positions_in_list, $i) if $elem eq $a; - $i++; - } - return @positions_in_list; -} - -sub last_position { - local($this,$elem,@array) = @_; - - $result = -1; - $i = 0; - foreach $a (@array) { - $result = $i if $elem eq $a; - $i++; - } - return $result; -} - -sub rand_n_digit_number { - local($this,$n) = @_; - - return 0 unless $n =~ /^[1-9]\d*$/; - $ten_power_n = 10 ** ($n - 1); - return int(rand(9 * $ten_power_n)) + $ten_power_n; -} - -# Consider File::Temp -sub new_tmp_filename { - local($this,$filename) = @_; - - $loop_limit = 1000; - ($dir,$simple_filename) = ($filename =~ /^(.+)\/([^\/]+)$/); - $simple_filename = $filename unless defined($simple_filename); - $new_filename = "$dir/tmp-" . $this->rand_n_digit_number(8) . "-$simple_filename"; - while ((-e $new_filename) && ($loop_limit-- >= 0)) { - $new_filename = "$dir/tmp-" . $this->rand_n_digit_number(8) . "-$simple_filename"; - } - return $new_filename; -} - -# support sorting order: "8", "8.0", "8.5", "8.5.1.", "8.10", "10", "10-12" - -sub compare_complex_numeric { - local($this,$a,$b) = @_; - - (my $a_num,my $a_rest) = ($a =~ /^(\d+)\D*(.*)$/); - (my $b_num,my $b_rest) = ($b =~ /^(\d+)\D*(.*)$/); - - if (defined($a_rest) && defined($b_rest)) { - return ($a_num <=> $b_num) - || $this->compare_complex_numeric($a_rest,$b_rest); - } else { - return $a cmp $b; - } -} - -# support sorting order: "lesson8-ps-v1.9.xml", "Lesson 10_ps-v_1.11.xml" -# approach: segment strings into alphabetic and numerical sections and compare pairwise - -sub compare_mixed_alpha_numeric { - local($this,$a,$b) = @_; - - ($a_alpha,$a_num,$a_rest) = ($a =~ /^(\D*)(\d[-\d\.]*)(.*)$/); - ($b_alpha,$b_num,$b_rest) = ($b =~ /^(\D*)(\d[-\d\.]*)(.*)$/); - - ($a_alpha) = ($a =~ /^(\D*)/) unless defined $a_alpha; - ($b_alpha) = ($b =~ /^(\D*)/) unless defined $b_alpha; - - # ignore non-alphabetic characters in alpha sections - $a_alpha =~ s/\W|_//g; - $b_alpha =~ s/\W|_//g; - - if ($alpha_cmp = lc $a_alpha cmp lc $b_alpha) { - return $alpha_cmp; - } elsif (defined($a_rest) && defined($b_rest)) { - return $this->compare_complex_numeric($a_num,$b_num) - || $this->compare_mixed_alpha_numeric ($a_rest,$b_rest); - } else { - return (defined($a_num) <=> defined($b_num)) || ($a cmp $b); - } -} - -# @sorted_lessons = sort { NLP::utilities->compare_mixed_alpha_numeric($a,$b) } @lessons; - -sub html_guarded_p { - local($this,$string) = @_; - - return 0 if $string =~ /[<>"]/; - $string .= " "; - @segs = split('&',$string); - shift @segs; - foreach $seg (@segs) { - next if $seg =~ /^[a-z]{2,6};/i; - # next if $seg =~ /^amp;/; - # next if $seg =~ /^quot;/; - # next if $seg =~ /^nbsp;/; - # next if $seg =~ /^gt;/; - # next if $seg =~ /^lt;/; - next if $seg =~ /^#(\d+);/; - next if $seg =~ /^#x([0-9a-fA-F]+);/; - return 0; - } - return 1; -} - -sub guard_tooltip_text { - local($this,$string) = @_; - - $string =~ s/\xCB\x88/'/g; - return $string; -} - -sub guard_html { - local($this,$string,$control_string) = @_; - - return "" unless defined($string); - my $guarded_string; - $control_string = "" unless defined($control_string); - return $string if ($string =~ /&/) - && (! ($control_string =~ /\bstrict\b/)) - && $this->html_guarded_p($string); - $guarded_string = $string; - $guarded_string =~ s/&/&/g; - if ($control_string =~ /slash quote/) { - $guarded_string =~ s/"/\\"/g; - } elsif ($control_string =~ /keep quote/) { - } else { - $guarded_string =~ s/\"/"/g; - } - if ($control_string =~ /escape-slash/) { - $guarded_string =~ s/\//&x2F;/g; - } - $guarded_string =~ s/>/>/g; - $guarded_string =~ s/" : - /^lt$/i ? "<" : - /^x2F$/i ? "/" : - /^nbsp$/i ? "\xC2\xA0" : - /^#(\d+)$/ ? $this->chr($1) : - /^#x([0-9a-f]+)$/i ? $this->chr(hex($1)) : - $_ - }gex; - return $string; -} - -sub unguard_html_r { - local($this,$string) = @_; - - return undef unless defined($string); - - $string =~ s/&/&/g; - $string =~ s/"/'/g; - $string =~ s/<//g; - - ($d) = ($string =~ /&#(\d+);/); - while (defined($d)) { - $c = $this->chr($d); - $string =~ s/&#$d;/$c/g; - ($d) = ($string =~ /&#(\d+);/); - } - ($x) = ($string =~ /&#x([0-9a-f]+);/i); - while (defined($x)) { - $c = $this->chr(hex($x)); - $string =~ s/&#x$x;/$c/g; - ($x) = ($string =~ /&#x([0-9a-f]+);/i); - } - $string0 = $string; - ($x) = ($string =~ /(?:https?|www|\.com)\S*\%([0-9a-f]{2,2})/i); - while (defined($x)) { - $c = $this->chr("%" . hex($x)); - $string =~ s/\%$x/$c/g; - ($x) = ($string =~ /(?:https?|www|\.com)\S*\%([0-9a-f]{2,2})/i); - } - return $string; -} - -sub unguard_html_l { - local($caller,$string) = @_; - - return undef unless defined($string); - - my $pre; - my $core; - my $post; - my $repl; - my $s = $string; - if (($pre,$core,$post) = ($s =~ /^(.*)&(amp|quot|lt|gt|#\d+|#x[0-9a-f]+);(.*)$/i)) { - $repl = "?"; - $repl = "&" if $core =~ /^amp$/i; - $repl = "'" if $core =~ /^quot$/i; - $repl = "<" if $core =~ /^lt$/i; - $repl = ">" if $core =~ /^gt$/i; - if ($core =~ /^#\d+$/i) { - $core2 = substr($core,1); - $repl = $caller->chr($core2); - } - $repl = $caller->chr(hex(substr($core,2))) if $core =~ /^#x[0-9a-f]+$/i; - $s = $pre . $repl . $post; - } - return $s; -} - -sub guard_html_quote { - local($caller,$string) = @_; - - $string =~ s/"/"/g; - return $string; -} - -sub unguard_html_quote { - local($caller,$string) = @_; - - $string =~ s/"/"/g; - return $string; -} - -sub uri_encode { - local($caller,$string) = @_; - - $string =~ s/([^^A-Za-z0-9\-_.!~*()'])/ sprintf "%%%02x", ord $1 /eg; - return $string; -} - -sub uri_decode { - local($caller,$string) = @_; - - $string =~ s/%([0-9A-Fa-f]{2})/chr(hex($1))/eg; - return $string; -} - -sub remove_xml_tags { - local($caller,$string) = @_; - - $string =~ s/<\/?[a-zA-Z][-_:a-zA-Z0-9]*(\s+[a-zA-Z][-_:a-zA-Z0-9]*=\"[^"]*\")*\s*\/?>//g; - return $string; -} - -sub remove_any_tokenization_at_signs_around_xml_tags { - local($caller,$string) = @_; - - $string =~ s/(?:\@ \@)?(<[^<>]+>)(?:\@ \@)?/$1/g; - $string =~ s/\@?(<[^<>]+>)\@?/$1/g; - return $string; -} - -sub remove_xml_tags_and_any_bordering_at_signs { - # at-signs from tokenization - local($caller,$string) = @_; - - $string =~ s/\@?<\/?[a-zA-Z][-_:a-zA-Z0-9]*(\s+[a-zA-Z][-_:a-zA-Z0-9]*=\"[^"]*\")*\s*\/?>\@?//g; - return $string; -} - -sub chr { - local($caller,$i) = @_; - - return undef unless $i =~ /^\%?\d+$/; - if ($i =~ /^%/) { - $i =~ s/^\%//; - return chr($i) if $i < 128; - return "\x80" | chr($i - 128) if $i < 256; - } else { - return chr($i) if $i < 128; - return ("\xC0" | chr(($i / 64) % 32)) - . ("\x80" | chr($i % 64)) if $i < 2048; - return ("\xE0" | chr(int($i / 4096) % 16)) - . ("\x80" | chr(int($i / 64) % 64)) - . ("\x80" | chr($i % 64)) if $i < 65536; - return ("\xF0" | chr(int($i / 262144) % 8)) - . ("\x80" | chr(int($i / 4096) % 64)) - . ("\x80" | chr(int($i / 64) % 64)) - . ("\x80" | chr($i % 64)) if $i < 2097152; - } - return "?"; -} - -sub guard_cgi { - local($caller, $string) = @_; - - $guarded_string = $string; - if ($string =~ /[\x80-\xFF]/) { - $guarded_string = ""; - while ($string ne "") { - $char = substr($string, 0, 1); - $string = substr($string, 1); - if ($char =~ /^[\\ ;\#\&\:\=\"\'\+\?\x00-\x1F\x80-\xFF]$/) { - $hex = sprintf("%2.2x",ord($char)); - $guarded_string .= uc "%$hex"; - } else { - $guarded_string .= $char; - } - } - } else { - $guarded_string = $string; - $guarded_string =~ s/%/%25/g; - $guarded_string =~ s/\n/%5Cn/g; - $guarded_string =~ s/\t/%5Ct/g; - $guarded_string =~ s/ /%20/g; - $guarded_string =~ s/"/%22/g; - $guarded_string =~ s/#/%23/g; - $guarded_string =~ s/&/%26/g; - $guarded_string =~ s/'/%27/g; - $guarded_string =~ s/\+/%2B/g; - $guarded_string =~ s/\//%2F/g; - $guarded_string =~ s/:/%3A/g; - $guarded_string =~ s/;/%3B/g; - $guarded_string =~ s//%3E/g; - $guarded_string =~ s/\?/%3F/g; - } - return $guarded_string; -} - -sub repair_cgi_guard { - local($caller,$string) = @_; - # undo second cgi-guard, e.g. "Jo%25C3%25ABlle_Aubron" -> "Jo%C3%ABlle_Aubron" - - $string =~ s/(%)25([CD][0-9A-F]%)25([89AB][0-9A-F])/$1$2$3/g; - $string =~ s/(%)25(E[0-9A-F]%)25([89AB][0-9A-F]%)25([89AB][0-9A-F])/$1$2$3$4/g; - return $string; -} - -sub unguard_cgi { - local($caller,$string) = @_; - - $unguarded_string = $string; - $unguarded_string =~ s/%5Cn/\n/g; - $unguarded_string =~ s/%5Ct/\t/g; - $unguarded_string =~ s/%20/ /g; - $unguarded_string =~ s/%23/#/g; - $unguarded_string =~ s/%26/&/g; - $unguarded_string =~ s/%2B/+/g; - $unguarded_string =~ s/%2C/,/g; - $unguarded_string =~ s/%3A/:/g; - $unguarded_string =~ s/%3D/=/g; - $unguarded_string =~ s/%3F/?/g; - $unguarded_string =~ s/%C3%A9/\xC3\xA9/g; - - # more general - ($code) = ($unguarded_string =~ /%([0-9A-F]{2,2})/); - while (defined($code)) { - $percent_code = "%" . $code; - $hex_code = sprintf("%c", hex($code)); - $unguarded_string =~ s/$percent_code/$hex_code/g; - ($code) = ($unguarded_string =~ /%([0-9A-F]{2,2})/); - } - - return $unguarded_string; -} - -sub regex_guard { - local($caller,$string) = @_; - - $guarded_string = $string; - $guarded_string =~ s/([\\\/\^\|\(\)\{\}\$\@\*\+\?\.\[\]])/\\$1/g - if $guarded_string =~ /[\\\/\^\|\(\)\{\}\$\@\*\+\?\.\[\]]/; - - return $guarded_string; -} - -sub g_regex_spec_tok_p { - local($this,$string) = @_; - - # specials: ( ) (?: ) [ ] - return ($string =~ /^(\(\?:|[()\[\]])$/); -} - -sub regex_guard_norm { - local($this,$string) = @_; - - return $string unless $string =~ /[\[\]\\()$@?+]/; - my $rest = $string; - my @stack = (""); - while ($rest ne "") { - # specials: ( ) (?: ) [ ] ? + - if (($pre, $special, $post) = ($rest =~ /^((?:\\.|[^\[\]()?+])*)(\(\?:|[\[\]()?+])(.*)$/)) { - # print STDERR "Special: $pre *$special* $post\n"; - unless ($pre eq "") { - push(@stack, $pre); - while (($#stack >= 1) && (! $this->g_regex_spec_tok_p($stack[$#stack-1])) - && (! $this->g_regex_spec_tok_p($stack[$#stack]))) { - $s1 = pop @stack; - $s2 = pop @stack; - push(@stack, "$s2$s1"); - } - } - if ($special =~ /^[?+]$/) { - push(@stack, "\\") if ($stack[$#stack] eq "") - || ($this->g_regex_spec_tok_p($stack[$#stack]) && ($stack[$#stack] ne "[")); - push(@stack, $special); - } elsif ($special eq "]") { - if (($#stack >= 1) && ($stack[$#stack-1] eq "[") && ! $this->g_regex_spec_tok_p($stack[$#stack])) { - $char_expression = pop @stack; - pop @stack; - push(@stack, "[$char_expression]"); - } else { - push(@stack, $special); - } - } elsif (($special =~ /^[()]/) && (($stack[$#stack] eq "[") - || (($#stack >= 1) - && ($stack[$#stack-1] eq "[") - && ! $this->g_regex_spec_tok_p($stack[$#stack])))) { - push(@stack, "\\$special"); - } elsif ($special eq ")") { - if (($#stack >= 1) && ($stack[$#stack-1] =~ /^\((\?:)?$/) && ! $this->g_regex_spec_tok_p($stack[$#stack])) { - $alt_expression = pop @stack; - $open_para = pop @stack; - if ($open_para eq "(") { - push(@stack, "(?:$alt_expression)"); - } else { - push(@stack, "$open_para$alt_expression)"); - } - } else { - push(@stack, $special); - } - } else { - push(@stack, $special); - } - while (($#stack >= 1) && (! $this->g_regex_spec_tok_p($stack[$#stack-1])) - && (! $this->g_regex_spec_tok_p($stack[$#stack]))) { - $s1 = pop @stack; - $s2 = pop @stack; - push(@stack, "$s2$s1"); - } - $rest = $post; - } else { - push(@stack, $rest); - $rest = ""; - } - } - # print STDERR "Stack: " . join(";", @stack) . "\n"; - foreach $i ((0 .. $#stack)) { - $stack_elem = $stack[$i]; - if ($stack_elem =~ /^[()\[\]]$/) { - $stack[$i] = "\\" . $stack[$i]; - } - } - return join("", @stack); -} - -sub string_guard { - local($caller,$string) = @_; - - return "" unless defined($string); - $guarded_string = $string; - $guarded_string =~ s/([\\"])/\\$1/g - if $guarded_string =~ /[\\"]/; - - return $guarded_string; -} - -sub json_string_guard { - local($caller,$string) = @_; - - return "" unless defined($string); - $guarded_string = $string; - $guarded_string =~ s/([\\"])/\\$1/g - if $guarded_string =~ /[\\"]/; - $guarded_string =~ s/\r*\n/\\n/g - if $guarded_string =~ /\n/; - - return $guarded_string; -} - -sub json_string_unguard { - local($caller,$string) = @_; - - return "" unless defined($string); - $string =~ s/\\n/\n/g - if $string =~ /\\n/; - return $string; -} - -sub guard_javascript_arg { - local($caller,$string) = @_; - - return "" unless defined($string); - $guarded_string = $string; - $guarded_string =~ s/\\/\\\\/g; - $guarded_string =~ s/'/\\'/g; - return $guarded_string; -} - -sub guard_substitution_right_hand_side { - # "$1x" => "$1 . \"x\"" - local($caller,$string) = @_; - - my $result = ""; - ($pre,$var,$post) = ($string =~ /^([^\$]*)(\$\d)(.*)$/); - while (defined($var)) { - $result .= " . " if $result; - $result .= "\"$pre\" . " unless $pre eq ""; - $result .= $var; - $string = $post; - ($pre,$var,$post) = ($string =~ /^([^\$]*)(\$\d)(.*)$/); - } - $result .= " . \"$string\"" if $string; - return $result; -} - -sub string_starts_with_substring { - local($caller,$string,$substring) = @_; - - $guarded_substring = $caller->regex_guard($substring); - return $string =~ /^$guarded_substring/; -} - -sub one_string_starts_with_the_other { - local($caller,$s1,$s2) = @_; - - return ($s1 eq $s2) - || $caller->string_starts_with_substring($s1,$s2) - || $caller->string_starts_with_substring($s2,$s1); -} - -sub string_ends_in_substring { - local($caller,$string,$substring) = @_; - - $guarded_substring = $caller->regex_guard($substring); - return $string =~ /$guarded_substring$/; -} - -sub string_equal_ignore_leading_multiple_or_trailing_blanks { - local($caller,$string1,$string2) = @_; - - return 1 if $string1 eq $string2; - $string1 =~ s/\s+/ /; - $string2 =~ s/\s+/ /; - $string1 =~ s/^\s+//; - $string2 =~ s/^\s+//; - $string1 =~ s/\s+$//; - $string2 =~ s/\s+$//; - - return $string1 eq $string2; -} - -sub strip_substring_from_start_of_string { - local($caller,$string,$substring,$error_code) = @_; - - $error_code = "ERROR" unless defined($error_code); - my $reg_surf = $caller->regex_guard($substring); - if ($string =~ /^$guarded_substring/) { - $string =~ s/^$reg_surf//; - return $string; - } else { - return $error_code; - } -} - -sub strip_substring_from_end_of_string { - local($caller,$string,$substring,$error_code) = @_; - - $error_code = "ERROR" unless defined($error_code); - my $reg_surf = $caller->regex_guard($substring); - if ($string =~ /$reg_surf$/) { - $string =~ s/$reg_surf$//; - return $string; - } else { - return $error_code; - } -} - -# to be deprecated -sub lang_code { - local($caller,$language) = @_; - - $langPM = NLP::Language->new(); - return $langPM->lang_code($language); -} - -sub full_language { - local($caller,$lang_code) = @_; - - return "Arabic" if $lang_code eq "ar"; - return "Chinese" if $lang_code eq "zh"; - return "Czech" if $lang_code eq "cs"; - return "Danish" if $lang_code eq "da"; - return "Dutch" if $lang_code eq "nl"; - return "English" if $lang_code eq "en"; - return "Finnish" if $lang_code eq "fi"; - return "French" if $lang_code eq "fr"; - return "German" if $lang_code eq "de"; - return "Greek" if $lang_code eq "el"; - return "Hebrew" if $lang_code eq "he"; - return "Hindi" if $lang_code eq "hi"; - return "Hungarian" if $lang_code eq "hu"; - return "Icelandic" if $lang_code eq "is"; - return "Indonesian" if $lang_code eq "id"; - return "Italian" if $lang_code eq "it"; - return "Japanese" if $lang_code eq "ja"; - return "Kinyarwanda" if $lang_code eq "rw"; - return "Korean" if $lang_code eq "ko"; - return "Latin" if $lang_code eq "la"; - return "Malagasy" if $lang_code eq "mg"; - return "Norwegian" if $lang_code eq "no"; - return "Pashto" if $lang_code eq "ps"; - return "Persian" if $lang_code eq "fa"; - return "Polish" if $lang_code eq "pl"; - return "Portuguese" if $lang_code eq "pt"; - return "Romanian" if $lang_code eq "ro"; - return "Russian" if $lang_code eq "ru"; - return "Spanish" if $lang_code eq "es"; - return "Swedish" if $lang_code eq "sv"; - return "Turkish" if $lang_code eq "tr"; - return "Urdu" if $lang_code eq "ur"; - return ""; -} - -# to be deprecated -sub short_lang_name { - local($caller,$lang_code) = @_; - - $langPM = NLP::Language->new(); - return $langPM->shortname($lang_code); -} - -sub ml_dir { - local($caller,$language,$type) = @_; - - $type = "MSB" unless defined($type); - $lang_code = $langPM->lang_code($language); - return $caller->ml_dir($lang_code, "lex") . "/corpora" if $type eq "corpora"; - return "" unless defined($rc); - $ml_home = $rc->ml_home_dir(); - return File::Spec->catfile($ml_home, "arabic") - if ($lang_code eq "ar-iq") && ! $caller->member(lc $type,"lex","onto","dict"); - $langPM = NLP::Language->new(); - $lexdir = $langPM->lexdir($lang_code); - return $lexdir if defined($lexdir); - return ""; -} - -sub language_lex_filename { - local($caller,$language,$type) = @_; - - $langPM = NLP::Language->new(); - if (($lang_code = $langPM->lang_code($language)) - && ($ml_dir = $caller->ml_dir($lang_code,$type)) - && ($norm_language = $caller->short_lang_name($lang_code))) { - return "$ml_dir/$norm_language-lex" if ($type eq "lex"); - return "$ml_dir/onto" if ($type eq "onto"); - return "$ml_dir/$norm_language-english-dict" if ($type eq "dict") && !($lang_code eq "en"); - return ""; - } else { - return ""; - } -} - -# filename_without_path is obsolete - replace with -# use File::Basename; -# basename($filename) -sub filename_without_path { - local($caller,$filename) = @_; - - $filename =~ s/^.*\/([^\/]+)$/$1/; - return $filename; -} - -sub option_string { - local($caller,$input_name,$default,*values,*labels) = @_; - - my $s = ""; - return $s; -} - -sub pes_subseq_surf { - local($this,$start,$length,$langCode,@pes) = @_; - - my $surf = ""; - if ($start+$length-1 <= $#pes) { - foreach $i ($start .. $start + $length - 1) { - my $pe = $pes[$i]; - $surf .= $pe->get("surf",""); - $surf .= " " if $langCode =~ /^(ar|en|fr)$/; - } - } - $surf =~ s/\s+$//; - return $surf; -} - -sub copyList { - local($this,@list) = @_; - - @copy_list = (); - foreach $elem (@list) { - push(@copy_list,$elem); - } - return @copy_list; -} - -sub list_with_same_elem { - local($this,$size,$elem) = @_; - - @list = (); - foreach $i (0 .. $size-1) { - push(@list,$elem); - } - return @list; -} - -sub count_occurrences { - local($this,$s,$substring) = @_; - - $occ = 0; - $new = $s; - $guarded_substring = $this->regex_guard($substring); - $new =~ s/$guarded_substring//; - while ($new ne $s) { - $occ++; - $s = $new; - $new =~ s/$guarded_substring//; - } - return $occ; -} - -sub position_of_nth_occurrence { - local($this,$s,$substring,$occ) = @_; - - return -1 unless $occ > 0; - my $pos = 0; - while (($pos = index($s, $substring, $pos)) >= 0) { - return $pos if $occ == 1; - $occ--; - $pos = $pos + length($substring); - } - return -1; -} - -sub has_diff_elements_p { - local($this,@array) = @_; - - return 0 if $#array < 1; - $elem = $array[0]; - - foreach $a (@array) { - return 1 if $elem ne $a; - } - return 0; -} - -sub init_log { - local($this,$logfile, $control) = @_; - - $control = "" unless defined($control); - if ((DEBUGGING || ($control =~ /debug/i)) && $logfile) { - system("rm -f $logfile"); - system("date > $logfile; chmod 777 $logfile"); - } -} - -sub time_stamp_log { - local($this,$logfile, $control) = @_; - - $control = "" unless defined($control); - if ((DEBUGGING || ($control =~ /debug/i)) && $logfile) { - system("date >> $logfile; chmod 777 $logfile"); - } -} - -sub log { - local($this,$message,$logfile,$control) = @_; - - $control = "" unless defined($control); - if ((DEBUGGING || ($control =~ /debug/i)) && $logfile) { - $this->init_log($logfile, $control) unless -w $logfile; - if ($control =~ /timestamp/i) { - $this->time_stamp_log($logfile, $control); - } - $guarded_message = $message; - $guarded_message =~ s/"/\\"/g; - system("echo \"$guarded_message\" >> $logfile"); - } -} - -sub month_name_to_month_number { - local($this,$month_name) = @_; - - $month_name_init = lc substr($month_name,0,3); - return $this->position($month_name_init, "jan", "feb", "mar", "apr", "may", "jun", "jul", "aug", "sep", "oct", "nov", "dec") + 1; -} - -my @short_month_names = ("Jan.","Febr.","March","April","May","June","July","Aug.","Sept.","Oct.","Nov.","Dec."); -my @full_month_names = ("January","February","March","April","May","June","July","August","September","October","November","December"); - -sub month_number_to_month_name { - local($this,$month_number, $control) = @_; - - $month_number =~ s/^0//; - if ($month_number =~ /^([1-9]|1[0-2])$/) { - return ($control && ($control =~ /short/i)) - ? $short_month_names[$month_number-1] - : $full_month_names[$month_number-1]; - } else { - return ""; - } -} - -sub leap_year { - local($this,$year) = @_; - - return 0 if $year % 4 != 0; - return 1 if $year % 400 == 0; - return 0 if $year % 100 == 0; - return 1; -} - -sub datetime { - local($this,$format,$time_in_secs, $command) = @_; - - $command = "" unless defined($command); - $time_in_secs = time unless defined($time_in_secs) && $time_in_secs; - @time_vector = ($command =~ /\b(gm|utc)\b/i) ? gmtime($time_in_secs) : localtime($time_in_secs); - ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst)=@time_vector; - $thisyear = $year + 1900; - $thismon=(Jan,Feb,Mar,Apr,May,Jun,Jul,Aug,Sep,Oct,Nov,Dec)[$mon]; - $thismon2=("Jan.","Febr.","March","April","May","June","July","Aug.","Sept.","Oct.","Nov.","Dec.")[$mon]; - $thismonth = $mon + 1; - $thisday=(Sun,Mon,Tue,Wed,Thu,Fri,Sat)[$wday]; - $milliseconds = int(($time_in_secs - int($time_in_secs)) * 1000); - $date="$thisday $thismon $mday, $thisyear"; - $sdate="$thismon $mday, $thisyear"; - $dashedDate = sprintf("%04d-%02d-%02d",$thisyear,$thismonth,$mday); - $slashedDate = sprintf("%02d/%02d/%04d",$mday,$thismonth,$thisyear); - $time=sprintf("%02d:%02d:%02d",$hour,$min,$sec); - $shorttime=sprintf("%d:%02d",$hour,$min); - $shortdatetime = "$thismon2 $mday, $shorttime"; - - if ($date =~ /undefined/) { - return ""; - } elsif ($format eq "date at time") { - return "$date at $time"; - } elsif ($format eq "date") { - return "$date"; - } elsif ($format eq "sdate") { - return "$sdate"; - } elsif ($format eq "ddate") { - return "$dashedDate"; - } elsif ($format eq "time") { - return "$time"; - } elsif ($format eq "dateTtime+ms") { - return $dashedDate . "T" . $time . "." . $milliseconds; - } elsif ($format eq "dateTtime") { - return $dashedDate . "T" . $time; - } elsif ($format eq "yyyymmdd") { - return sprintf("%04d%02d%02d",$thisyear,$thismonth,$mday); - } elsif ($format eq "short date at time") { - return $shortdatetime; - } else { - return "$date at $time"; - } -} - -sub datetime_of_last_file_modification { - local($this,$format,$filename) = @_; - - return $this->datetime($format,(stat($filename))[9]); -} - -sub add_1sec { - local($this,$datetime) = @_; - - if (($year,$month,$day,$hour,$minute,$second) = ($datetime =~ /^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)$/)) { - $second++; - if ($second >= 60) { $second -= 60; $minute++; } - if ($minute >= 60) { $minute -= 60; $hour++; } - if ($hour >= 24) { $hour -= 24; $day++; } - if ($month =~ /^(01|03|05|07|08|10|12)$/) { - if ($day > 31) { $day -= 31; $month++; } - } elsif ($month =~ /^(04|06|09|11)$/) { - if ($day > 30) { $day -= 30; $month++; } - } elsif (($month eq "02") && $this->leap_year($year)) { - if ($day > 29) { $day -= 29; $month++; } - } elsif ($month eq "02") { - if ($day > 28) { $day -= 28; $month++; } - } - if ($month > 12) { $month -= 12; $year++; } - return sprintf("%04d-%02d-%02dT%02d:%02d:%02d", $year,$month,$day,$hour,$minute,$second); - } else { - return ""; - } -} - -sub stopwatch { - local($this, $function, $id, *ht, *OUT) = @_; - # function: start|stop|count|report; start|stop times are absolute (in secs.) - - my $current_time = time; - # print OUT "Point S stopwatch $function $id $current_time\n"; - if ($function eq "start") { - if ($ht{STOPWATCH_START}->{$id}) { - $ht{STOPWATCH_N_RESTARTS}->{$id} = ($ht{STOPWATCH_N_RESTARTS}->{$id} || 0) + 1; - } else { - $ht{STOPWATCH_START}->{$id} = $current_time; - } - } elsif ($function eq "end") { - if ($start_time = $ht{STOPWATCH_START}->{$id}) { - $ht{STOPWATCH_TIME}->{$id} = ($ht{STOPWATCH_TIME}->{$id} || 0) + ($current_time - $start_time); - $ht{STOPWATCH_START}->{$id} = ""; - } else { - $ht{STOPWATCH_N_DEAD_ENDS}->{$id} = ($ht{STOPWATCH_N_DEAD_ENDS}->{$id} || 0) + 1; - } - } elsif ($function eq "count") { - $ht{STOPWATCH_COUNT}->{$id} = ($ht{STOPWATCH_COUNT}->{$id} || 0) + 1; - } elsif ($function eq "report") { - my $id2; - foreach $id2 (keys %{$ht{STOPWATCH_START}}) { - if ($start_time = $ht{STOPWATCH_START}->{$id2}) { - $ht{STOPWATCH_TIME}->{$id2} = ($ht{STOPWATCH_TIME}->{$id2} || 0) + ($current_time - $start_time); - $ht{STOPWATCH_START}->{$id2} = $current_time; - } - } - print OUT "Time report:\n"; - foreach $id2 (sort { $ht{STOPWATCH_TIME}->{$b} <=> $ht{STOPWATCH_TIME}->{$a} } - keys %{$ht{STOPWATCH_TIME}}) { - my $stopwatch_time = $ht{STOPWATCH_TIME}->{$id2}; - $stopwatch_time = $this->round_to_n_decimal_places($stopwatch_time, 3); - my $n_restarts = $ht{STOPWATCH_N_RESTARTS}->{$id2}; - my $n_dead_ends = $ht{STOPWATCH_N_DEAD_ENDS}->{$id2}; - my $start_time = $ht{STOPWATCH_START}->{$id2}; - print OUT " $id2: $stopwatch_time seconds"; - print OUT " with $n_restarts restart(s)" if $n_restarts; - print OUT " with $n_dead_ends dead end(s)" if $n_dead_ends; - print OUT " (active)" if $start_time; - print OUT "\n"; - } - foreach $id2 (sort { $ht{STOPWATCH_COUNT}->{$b} <=> $ht{STOPWATCH_COUNT}->{$a} } - keys %{$ht{STOPWATCH_COUNT}}) { - $count = $ht{STOPWATCH_COUNT}->{$id2}; - print OUT " C $id2: $count\n"; - } - } -} - -sub print_html_banner { - local($this,$text,$bgcolor,*OUT,$control) = @_; - - $control = "" unless defined($control); - $bgcolor = "#BBCCFF" unless defined($bgcolor); - print OUT "
"; - print OUT "  " unless $text =~ /^\s*<(table|nobr)/; - print OUT $text; - print OUT "
\n"; - print OUT "
\n" unless $control =~ /nobr/i; -} - -sub print_html_head { - local($this, $title, *OUT, $control, $onload_fc, $add_javascript) = @_; - - $control = "" unless defined($control); - $onload_fc = "" unless defined($onload_fc); - $onload_clause = ($onload_fc) ? " onload=\"$onload_fc\"" : ""; - $add_javascript = "" unless defined($add_javascript); - $max_age_clause = ""; - $max_age_clause = ""; # if $control =~ /\bexp1hour\b/; - $css_clause = ""; - $css_clause = "\n " if $control =~ /css/; - $css_clause .= "\n " if $control =~ /css/; - $css_clause = "\n " if $control =~ /css-handheld/; - $icon_clause = ""; - $icon_clause .= "\n " if $control =~ /\bAMR\b/i; - $icon_clause .= "\n " if $control =~ /\bCRE\b/i; - print OUT "\xEF\xBB\xBF\n" unless $control =~ /\bno-bom\b/; # utf8 marker byte order mark - print OUT< - - - $max_age_clause - $title$css_clause$icon_clause -END_OF_HEADER1 -; - - unless ($control =~ /no javascript/) { - print OUT< - - -END_OF_HEADER2 -; - } - - print OUT< - -END_OF_HEADER3 -; -} - - -sub print_html_foot { - local($this, *OUT) = @_; - - print OUT " \n"; - print OUT "\n"; -} - -sub print_html_page { - local($this, *OUT, $s) = @_; - - print OUT "\xEF\xBB\xBF\n"; - print OUT "\n"; - print OUT " \n"; - print OUT " DEBUG\n"; - print OUT " \n"; - print OUT " \n"; - print OUT " \n"; - print OUT " \n"; - print OUT " $s\n"; - print OUT " \n"; - print OUT "\n"; -} - -sub http_catfile { - local($this, @path) = @_; - - $result = File::Spec->catfile(@path); - $result =~ s/(https?):\/([a-zA-Z])/$1:\/\/$2/; - return $result; -} - -sub underscore_to_space { - local($this, $s) = @_; - - return "" unless defined($s); - - $s =~ s/_+/ /g; - return $s; -} - -sub space_to_underscore { - local($this, $s) = @_; - - return "" unless defined($s); - - $s =~ s/ /_/g; - return $s; -} - -sub remove_spaces { - local($this, $s) = @_; - - $s =~ s/\s//g; - return $s; -} - -sub is_punctuation_string_p { - local($this, $s) = @_; - - return "" unless $s; - $s = $this->normalize_string($s) if $s =~ /[\x80-\xBF]/; - return $s =~ /^[-_,;:.?!\/\@+*"()]+$/; -} - -sub is_rare_punctuation_string_p { - local($this, $s) = @_; - - return 0 unless $s =~ /^[\x21-\x2F\x3A\x40\x5B-\x60\x7B-\x7E]{2,}$/; - return 0 if $s =~ /^(\.{2,3}|-{2,3}|\*{2,3}|::|\@?[-\/:]\@?)$/; - return 1; -} - -sub simplify_punctuation { - local($this, $s) = @_; - - $s =~ s/\xE2\x80\x92/-/g; - $s =~ s/\xE2\x80\x93/-/g; - $s =~ s/\xE2\x80\x94/-/g; - $s =~ s/\xE2\x80\x95/-/g; - $s =~ s/\xE2\x80\x98/`/g; - $s =~ s/\xE2\x80\x99/'/g; - $s =~ s/\xE2\x80\x9A/`/g; - $s =~ s/\xE2\x80\x9C/"/g; - $s =~ s/\xE2\x80\x9D/"/g; - $s =~ s/\xE2\x80\x9E/"/g; - $s =~ s/\xE2\x80\x9F/"/g; - $s =~ s/\xE2\x80\xA2/*/g; - $s =~ s/\xE2\x80\xA4/./g; - $s =~ s/\xE2\x80\xA5/../g; - $s =~ s/\xE2\x80\xA6/.../g; - return $s; -} - -sub latin_plus_p { - local($this, $s, $control) = @_; - - $control = "" unless defined($control); - return $s =~ /^([\x20-\x7E]|\xC2[\xA1-\xBF]|[\xC3-\xCC][\x80-\xBF]|\xCA[\x80-\xAF]|\xE2[\x80-\xAF][\x80-\xBF])+$/; -} - -sub nth_line_in_file { - local($this, $filename, $n) = @_; - - return "" unless $n =~ /^[1-9]\d*$/; - open(IN, $filename) || return ""; - my $line_no = 0; - while () { - $line_no++; - if ($n == $line_no) { - $_ =~ s/\s+$//; - close(IN); - return $_; - } - } - close(IN); - return ""; -} - -sub read_file { - local($this, $filename) = @_; - - my $file_content = ""; - open(IN, $filename) || return ""; - while () { - $file_content .= $_; - } - close(IN); - return $file_content; -} - -sub cap_list { - local($this, @list) = @_; - - @cap_list = (); - foreach $l (@list) { - ($premod, $core) = ($l =~ /^(a|an) (\S.*)$/); - if (defined($premod) && defined($core)) { - push(@cap_list, "$premod \u$core"); - } elsif ($this->cap_member($l, "US")) { - push(@cap_list, uc $l); - } else { - push(@cap_list, "\u$l"); - } - } - return @cap_list; -} - -sub integer_list_with_commas_and_ranges { - local($this, @list) = @_; - - my $in_range_p = 0; - my $last_value = 0; - my $result = ""; - while (@list) { - $elem = shift @list; - if ($elem =~ /^\d+$/) { - if ($in_range_p) { - if ($elem == $last_value + 1) { - $last_value = $elem; - } else { - $result .= "-$last_value, $elem"; - if (@list && ($next = $list[0]) && ($elem =~ /^\d+$/) && ($next =~ /^\d+$/) - && ($next == $elem + 1)) { - $last_value = $elem; - $in_range_p = 1; - } else { - $in_range_p = 0; - } - } - } else { - $result .= ", $elem"; - if (@list && ($next = $list[0]) && ($elem =~ /^\d+$/) && ($next =~ /^\d+$/) - && ($next == $elem + 1)) { - $last_value = $elem; - $in_range_p = 1; - } - } - } else { - if ($in_range_p) { - $result .= "-$last_value, $elem"; - $in_range_p = 0; - } else { - $result .= ", $elem"; - } - } - } - if ($in_range_p) { - $result .= "-$last_value"; - } - $result =~ s/^,\s*//; - return $result; -} - -sub comma_append { - local($this, $a, $b) = @_; - - if (defined($a) && ($a =~ /\S/)) { - if (defined($b) && ($b =~ /\S/)) { - return "$a,$b"; - } else { - return $a; - } - } else { - if (defined($b) && ($b =~ /\S/)) { - return $b; - } else { - return ""; - } - } -} - -sub version { - return "3.17"; -} - -sub print_stderr { - local($this, $message, $verbose) = @_; - - $verbose = 1 unless defined($verbose); - print STDERR $message if $verbose; - return 1; -} - -sub print_log { - local($this, $message, *LOG, $verbose) = @_; - - $verbose = 1 unless defined($verbose); - print LOG $message if $verbose; - return 1; -} - -sub compare_alignment { - local($this, $a, $b, $delimiter) = @_; - - $delimiter = "-" unless $delimiter; - my @a_list = split($delimiter, $a); - my @b_list = split($delimiter, $b); - - while (@a_list && @b_list) { - $a_head = shift @a_list; - $b_head = shift @b_list; - next if $a_head eq $b_head; - return $a_head <=> $b_head if ($a_head =~ /^\d+$/) && ($b_head =~ /^\d+$/); - return $a_head cmp $b_head; - } - return -1 if @a_list; - return 1 if @b_list; - return 0; -} - -sub normalize_string { - # normalize punctuation, full-width characters (to ASCII) - local($this, $s, $control) = @_; - - $control = "" unless defined($control); - - $norm_s = $s; - $norm_s =~ tr/A-Z/a-z/; - - $norm_s =~ s/ \@([-:\/])/ $1/g; # non-initial left @ - $norm_s =~ s/^\@([-:\/])/$1/; # initial left @ - $norm_s =~ s/([-:\/])\@ /$1 /g; # non-initial right @ - $norm_s =~ s/([-:\/])\@$/$1/; # initial right @ - $norm_s =~ s/([\(\)"])([,;.?!])/$1 $2/g; - $norm_s =~ s/\bcannot\b/can not/g; - - $norm_s =~ s/\xC2\xAD/-/g; # soft hyphen - - $norm_s =~ s/\xE2\x80\x94/-/g; # em dash - $norm_s =~ s/\xE2\x80\x95/-/g; # horizontal bar - $norm_s =~ s/\xE2\x80\x98/`/g; # grave accent - $norm_s =~ s/\xE2\x80\x99/'/g; # apostrophe - $norm_s =~ s/\xE2\x80\x9C/"/g; # left double quote mark - $norm_s =~ s/\xE2\x80\x9D/"/g; # right double quote mark - $norm_s =~ s/\xE2\x94\x80/-/g; # box drawings light horizontal - $norm_s =~ s/\xE2\x94\x81/-/g; # box drawings heavy horizontal - $norm_s =~ s/\xE3\x80\x81/,/g; # ideographic comma - $norm_s =~ s/\xE3\x80\x82/./g; # ideographic full stop - $norm_s =~ s/\xE3\x80\x88/"/g; # left angle bracket - $norm_s =~ s/\xE3\x80\x89/"/g; # right angle bracket - $norm_s =~ s/\xE3\x80\x8A/"/g; # left double angle bracket - $norm_s =~ s/\xE3\x80\x8B/"/g; # right double angle bracket - $norm_s =~ s/\xE3\x80\x8C/"/g; # left corner bracket - $norm_s =~ s/\xE3\x80\x8D/"/g; # right corner bracket - $norm_s =~ s/\xE3\x80\x8E/"/g; # left white corner bracket - $norm_s =~ s/\xE3\x80\x8F/"/g; # right white corner bracket - $norm_s =~ s/\xE3\x83\xBB/\xC2\xB7/g; # katakana middle dot -> middle dot - $norm_s =~ s/\xEF\xBB\xBF//g; # UTF8 marker - - if ($control =~ /\bzh\b/i) { - # de-tokenize Chinese - unless ($control =~ /\bpreserve-tok\b/) { - while ($norm_s =~ /[\xE0-\xEF][\x80-\xBF][\x80-\xBF] [\xE0-\xEF][\x80-\xBF][\x80-\xBF]/) { - $norm_s =~ s/([\xE0-\xEF][\x80-\xBF][\x80-\xBF]) ([\xE0-\xEF][\x80-\xBF][\x80-\xBF])/$1$2/g; - } - $norm_s =~ s/([\xE0-\xEF][\x80-\xBF][\x80-\xBF]) ([\x21-\x7E])/$1$2/g; - $norm_s =~ s/([\x21-\x7E]) ([\xE0-\xEF][\x80-\xBF][\x80-\xBF])/$1$2/g; - } - - # fullwidth characters - while ($norm_s =~ /\xEF\xBC[\x81-\xBF]/) { - ($pre,$fullwidth,$post) = ($norm_s =~ /^(.*)(\xEF\xBC[\x81-\xBF])(.*)$/); - $fullwidth =~ s/^\xEF\xBC//; - $fullwidth =~ tr/[\x81-\xBF]/[\x21-\x5F]/; - $norm_s = "$pre$fullwidth$post"; - } - while ($norm_s =~ /\xEF\xBD[\x80-\x9E]/) { - ($pre,$fullwidth,$post) = ($norm_s =~ /^(.*)(\xEF\xBD[\x80-\x9E])(.*)$/); - $fullwidth =~ s/^\xEF\xBD//; - $fullwidth =~ tr/[\x80-\x9E]/[\x60-\x7E]/; - $norm_s = "$pre$fullwidth$post"; - } - $norm_s =~ tr/A-Z/a-z/ unless $control =~ /\bpreserve-case\b/; - - unless ($control =~ /\bpreserve-tok\b/) { - while ($norm_s =~ /[\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E] [\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E]/) { - $norm_s =~ s/([\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E]) ([\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E])/$1$2/g; - } - $norm_s =~ s/([\x21-\x7E]) ([\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E])/$1$2/g; - $norm_s =~ s/([\x21-\x2F\x3A-\x40\x5B-\x60\x7B-\x7E]) ([\x21-\x7E])/$1$2/g; - $norm_s =~ s/ (\xC2\xA9|\xC2\xB7|\xC3\x97) /$1/g; # copyright sign, middle dot, multiplication sign - } - } - - if (($control =~ /\bzh\b/i) && ($control =~ /\bnorm-char\b/)) { - $norm_s =~ s/\xE6\x96\xBC/\xE4\xBA\x8E/g; # feng1 (first char. of Chin. "lie low", line 1308) - $norm_s =~ s/\xE6\xAD\xA7/\xE5\xB2\x90/g; # qi2 (second char. of Chin. "difference", line 1623) - $norm_s =~ s/\xE8\x82\xB2/\xE6\xAF\x93/g; # yu4 (second char. of Chin. "sports", line 440) - $norm_s =~ s/\xE8\x91\x97/\xE7\x9D\x80/g; # zhao (second char. of Chin. "prominent", line 4) - $norm_s =~ s/\xE9\x81\x87/\xE8\xBF\x82/g; # yu4 (second char. of Chin. "good luck", line 959) - } - - if ($control =~ /\bspurious-punct\b/) { - $norm_s =~ s/^\s*[-_\." ]+//; - $norm_s =~ s/[-_\." ]+\s*$//; - $norm_s =~ s/\(\s+end\s+\)\s*$//i; - $norm_s =~ s/^\s*null\s*$//i; - } - - $norm_s =~ s/^\s+//; - $norm_s =~ s/\s+$//; - $norm_s =~ s/\s+/ /g; - - return $norm_s; -} - -sub normalize_extreme_string { - local($this, $s, $control) = @_; - - $control = "" unless defined($control); - - $norm_s = $s; - $norm_s =~ s/\xE2\xA9\xBE/\xE2\x89\xA5/g; # slanted greater than or equal to - - return $norm_s; -} - -sub increase_ht_count { - local($this, *ht, $incr, @path) = @_; - - if ($#path == 0) { - $ht{($path[0])} = ($ht{($path[0])} || 0) + $incr; - } elsif ($#path == 1) { - $ht{($path[0])}->{($path[1])} - = ($ht{($path[0])}->{($path[1])} || 0) + $incr; - } elsif ($#path == 2) { - $ht{($path[0])}->{($path[1])}->{($path[2])} - = ($ht{($path[0])}->{($path[1])}->{($path[2])} || 0) + $incr; - } elsif ($#path == 3) { - $ht{($path[0])}->{($path[1])}->{($path[2])}->{($path[3])} - = ($ht{($path[0])}->{($path[1])}->{($path[2])}->{($path[3])} || 0) + $incr; - } elsif ($#path == 4) { - $ht{($path[0])}->{($path[1])}->{($path[2])}->{($path[3])}->{($path[4])} - = ($ht{($path[0])}->{($path[1])}->{($path[2])}->{($path[3])}->{($path[4])} || 0) + $incr; - } else { - print STDERR "increase_ht_count unsupported for path of length " . ($#path + 1) . "\n"; - } -} - -sub adjust_numbers { - # non-negative integers - local($this, $s, $delta) = @_; - - $result = ""; - while ($s =~ /\d/) { - ($pre,$i,$post) = ($s =~ /^([^0-9]*)(\d+)([^0-9].*|)$/); - $result .= $pre . ($i + $delta); - $s = $post; - } - $result .= $s; - return $result; -} - -sub first_defined { - local($this, @list) = @_; - - foreach $elem (@list) { - return $elem if defined($elem); - } - return ""; -} - -sub first_defined_non_empty { - local($this, @list) = @_; - - foreach $item (@list) { - return $item if defined($item) && ($item ne ""); - } - return ""; -} - -sub elem_after_member_list { - local($this,$elem,@array) = @_; - - my @elem_after_member_list = (); - foreach $i ((0 .. ($#array - 1))) { - push(@elem_after_member_list, $array[$i+1]) if $elem eq $array[$i]; - } - return join(" ", @elem_after_member_list); -} - -sub add_value_to_list { - local($this,$s,$value,$sep) = @_; - - $s = "" unless defined($s); - $sep = "," unless defined($sep); - return ($s =~ /\S/) ? "$s$sep$value" : $value; -} - -sub add_new_value_to_list { - local($this,$s,$value,$sep) = @_; - - $s = "" unless defined($s); - $sep = "," unless defined($sep); - my @values = split(/$sep/, $s); - push(@values, $value) if defined($value) && ! $this->member($value, @values); - - return join($sep, @values); -} - -sub add_new_hash_value_to_list { - local($this,*ht,$key,$value,$sep) = @_; - - $sep = "," unless defined($sep); - my $value_s = $ht{$key}; - if (defined($value_s)) { - my @values = split(/$sep/, $value_s); - push(@values, $value) unless $this->member($value, @values); - $ht{$key} = join($sep, @values); - } else { - $ht{$key} = $value; - } -} - -sub ip_info { - local($this, $ip_address) = @_; - - my %ip_map = (); - $ip_map{"128.9.208.69"} = "Ulf Hermjakob (bach.isi.edu)"; - $ip_map{"128.9.208.169"} = "Ulf Hermjakob (brahms.isi.edu)"; - $ip_map{"128.9.184.148"} = "Ulf Hermjakob (beethoven.isi.edu ?)"; - $ip_map{"128.9.184.162"} = "Ulf Hermjakob (beethoven.isi.edu)"; - $ip_map{"128.9.176.39"} = "Kevin Knight"; - $ip_map{"128.9.184.187"} = "Kevin Knight"; - $ip_map{"128.9.216.56"} = "Kevin Knight"; - $ip_map{"128.9.208.155"} = "cage.isi.edu"; - - return ($ip_name = $ip_map{$ip_address}) ? "$ip_address - $ip_name" : $ip_address; -} - -# from standalone de-accent.pl -sub de_accent_string { - local($this, $s) = @_; - - $s =~ tr/A-Z/a-z/; - unless (0) { - # Latin-1 - if ($s =~ /\xC3[\x80-\xBF]/) { - $s =~ s/(À|Á|Â|Ã|Ä|Å)/A/g; - $s =~ s/Æ/Ae/g; - $s =~ s/Ç/C/g; - $s =~ s/Ð/D/g; - $s =~ s/(È|É|Ê|Ë)/E/g; - $s =~ s/(Ì|Í|Î|Ï)/I/g; - $s =~ s/Ñ/N/g; - $s =~ s/(Ò|Ó|Ô|Õ|Ö|Ø)/O/g; - $s =~ s/(Ù|Ú|Û|Ü)/U/g; - $s =~ s/Þ/Th/g; - $s =~ s/Ý/Y/g; - $s =~ s/(à|á|â|ã|ä|å)/a/g; - $s =~ s/æ/ae/g; - $s =~ s/ç/c/g; - $s =~ s/(è|é|ê|ë)/e/g; - $s =~ s/(ì|í|î|ï)/i/g; - $s =~ s/ð/d/g; - $s =~ s/ñ/n/g; - $s =~ s/(ò|ó|ô|õ|ö)/o/g; - $s =~ s/ß/ss/g; - $s =~ s/þ/th/g; - $s =~ s/(ù|ú|û|ü)/u/g; - $s =~ s/(ý|ÿ)/y/g; - } - # Latin Extended-A - if ($s =~ /[\xC4-\xC5][\x80-\xBF]/) { - $s =~ s/(Ā|Ă|Ą)/A/g; - $s =~ s/(ā|ă|ą)/a/g; - $s =~ s/(Ć|Ĉ|Ċ|Č)/C/g; - $s =~ s/(ć|ĉ|ċ|č)/c/g; - $s =~ s/(Ď|Đ)/D/g; - $s =~ s/(ď|đ)/d/g; - $s =~ s/(Ē|Ĕ|Ė|Ę|Ě)/E/g; - $s =~ s/(ē|ĕ|ė|ę|ě)/e/g; - $s =~ s/(Ĝ|Ğ|Ġ|Ģ)/G/g; - $s =~ s/(ĝ|ğ|ġ|ģ)/g/g; - $s =~ s/(Ĥ|Ħ)/H/g; - $s =~ s/(ĥ|ħ)/h/g; - $s =~ s/(Ĩ|Ī|Ĭ|Į|İ)/I/g; - $s =~ s/(ĩ|ī|ĭ|į|ı)/i/g; - $s =~ s/IJ/Ij/g; - $s =~ s/ij/ij/g; - $s =~ s/Ĵ/J/g; - $s =~ s/ĵ/j/g; - $s =~ s/Ķ/K/g; - $s =~ s/(ķ|ĸ)/k/g; - $s =~ s/(Ĺ|Ļ|Ľ|Ŀ|Ł)/L/g; - $s =~ s/(ļ|ľ|ŀ|ł)/l/g; - $s =~ s/(Ń|Ņ|Ň|Ŋ)/N/g; - $s =~ s/(ń|ņ|ň|ʼn|ŋ)/n/g; - $s =~ s/(Ō|Ŏ|Ő)/O/g; - $s =~ s/(ō|ŏ|ő)/o/g; - $s =~ s/Œ/Oe/g; - $s =~ s/œ/oe/g; - $s =~ s/(Ŕ|Ŗ|Ř)/R/g; - $s =~ s/(ŕ|ŗ|ř)/r/g; - $s =~ s/(Ś|Ŝ|Ş|Š)/S/g; - $s =~ s/(ś|ŝ|ş|š|ſ)/s/g; - $s =~ s/(Ţ|Ť|Ŧ)/T/g; - $s =~ s/(ţ|ť|ŧ)/t/g; - $s =~ s/(Ũ|Ū|Ŭ|Ů|Ű|Ų)/U/g; - $s =~ s/(ũ|ū|ŭ|ů|ű|ų)/u/g; - $s =~ s/Ŵ/W/g; - $s =~ s/ŵ/w/g; - $s =~ s/(Ŷ|Ÿ)/Y/g; - $s =~ s/ŷ/y/g; - $s =~ s/(Ź|Ż|Ž)/Z/g; - $s =~ s/(ź|ż|ž)/z/g; - } - # Latin Extended-B - if ($s =~ /[\xC7-\xC7][\x80-\xBF]/) { - $s =~ s/(\xC7\x8D)/A/g; - $s =~ s/(\xC7\x8E)/a/g; - $s =~ s/(\xC7\x8F)/I/g; - $s =~ s/(\xC7\x90)/i/g; - $s =~ s/(\xC7\x91)/O/g; - $s =~ s/(\xC7\x92)/o/g; - $s =~ s/(\xC7\x93)/U/g; - $s =~ s/(\xC7\x94)/u/g; - $s =~ s/(\xC7\x95)/U/g; - $s =~ s/(\xC7\x96)/u/g; - $s =~ s/(\xC7\x97)/U/g; - $s =~ s/(\xC7\x98)/u/g; - $s =~ s/(\xC7\x99)/U/g; - $s =~ s/(\xC7\x9A)/u/g; - $s =~ s/(\xC7\x9B)/U/g; - $s =~ s/(\xC7\x9C)/u/g; - } - # Latin Extended Additional - if ($s =~ /\xE1[\xB8-\xBF][\x80-\xBF]/) { - $s =~ s/(ḁ|ạ|ả|ấ|ầ|ẩ|ẫ|ậ|ắ|ằ|ẳ|ẵ|ặ|ẚ)/a/g; - $s =~ s/(ḃ|ḅ|ḇ)/b/g; - $s =~ s/(ḉ)/c/g; - $s =~ s/(ḋ|ḍ|ḏ|ḑ|ḓ)/d/g; - $s =~ s/(ḕ|ḗ|ḙ|ḛ|ḝ|ẹ|ẻ|ẽ|ế|ề|ể|ễ|ệ)/e/g; - $s =~ s/(ḟ)/f/g; - $s =~ s/(ḡ)/g/g; - $s =~ s/(ḣ|ḥ|ḧ|ḩ|ḫ)/h/g; - $s =~ s/(ḭ|ḯ|ỉ|ị)/i/g; - $s =~ s/(ḱ|ḳ|ḵ)/k/g; - $s =~ s/(ḷ|ḹ|ḻ|ḽ)/l/g; - $s =~ s/(ḿ|ṁ|ṃ)/m/g; - $s =~ s/(ṅ|ṇ|ṉ|ṋ)/m/g; - $s =~ s/(ọ|ỏ|ố|ồ|ổ|ỗ|ộ|ớ|ờ|ở|ỡ|ợ|ṍ|ṏ|ṑ|ṓ)/o/g; - $s =~ s/(ṕ|ṗ)/p/g; - $s =~ s/(ṙ|ṛ|ṝ|ṟ)/r/g; - $s =~ s/(ṡ|ṣ|ṥ|ṧ|ṩ|ẛ)/s/g; - $s =~ s/(ṫ|ṭ|ṯ|ṱ)/t/g; - $s =~ s/(ṳ|ṵ|ṷ|ṹ|ṻ|ụ|ủ|ứ|ừ|ử|ữ|ự)/u/g; - $s =~ s/(ṽ|ṿ)/v/g; - $s =~ s/(ẁ|ẃ|ẅ|ẇ|ẉ|ẘ)/w/g; - $s =~ s/(ẋ|ẍ)/x/g; - $s =~ s/(ẏ|ỳ|ỵ|ỷ|ỹ|ẙ)/y/g; - $s =~ s/(ẑ|ẓ|ẕ)/z/g; - $s =~ s/(Ḁ|Ạ|Ả|Ấ|Ầ|Ẩ|Ẫ|Ậ|Ắ|Ằ|Ẳ|Ẵ|Ặ)/A/g; - $s =~ s/(Ḃ|Ḅ|Ḇ)/B/g; - $s =~ s/(Ḉ)/C/g; - $s =~ s/(Ḋ|Ḍ|Ḏ|Ḑ|Ḓ)/D/g; - $s =~ s/(Ḕ|Ḗ|Ḙ|Ḛ|Ḝ|Ẹ|Ẻ|Ẽ|Ế|Ề|Ể|Ễ|Ệ)/E/g; - $s =~ s/(Ḟ)/F/g; - $s =~ s/(Ḡ)/G/g; - $s =~ s/(Ḣ|Ḥ|Ḧ|Ḩ|Ḫ)/H/g; - $s =~ s/(Ḭ|Ḯ|Ỉ|Ị)/I/g; - $s =~ s/(Ḱ|Ḳ|Ḵ)/K/g; - $s =~ s/(Ḷ|Ḹ|Ḻ|Ḽ)/L/g; - $s =~ s/(Ḿ|Ṁ|Ṃ)/M/g; - $s =~ s/(Ṅ|Ṇ|Ṉ|Ṋ)/N/g; - $s =~ s/(Ṍ|Ṏ|Ṑ|Ṓ|Ọ|Ỏ|Ố|Ồ|Ổ|Ỗ|Ộ|Ớ|Ờ|Ở|Ỡ|Ợ)/O/g; - $s =~ s/(Ṕ|Ṗ)/P/g; - $s =~ s/(Ṙ|Ṛ|Ṝ|Ṟ)/R/g; - $s =~ s/(Ṡ|Ṣ|Ṥ|Ṧ|Ṩ)/S/g; - $s =~ s/(Ṫ|Ṭ|Ṯ|Ṱ)/T/g; - $s =~ s/(Ṳ|Ṵ|Ṷ|Ṹ|Ṻ|Ụ|Ủ|Ứ|Ừ|Ử|Ữ|Ự)/U/g; - $s =~ s/(Ṽ|Ṿ)/V/g; - $s =~ s/(Ẁ|Ẃ|Ẅ|Ẇ|Ẉ)/W/g; - $s =~ s/(Ẍ)/X/g; - $s =~ s/(Ẏ|Ỳ|Ỵ|Ỷ|Ỹ)/Y/g; - $s =~ s/(Ẑ|Ẓ|Ẕ)/Z/g; - } - # Greek letters - if ($s =~ /\xCE[\x86-\xAB]/) { - $s =~ s/ά/α/g; - $s =~ s/έ/ε/g; - $s =~ s/ί/ι/g; - $s =~ s/ϊ/ι/g; - $s =~ s/ΐ/ι/g; - $s =~ s/ό/ο/g; - $s =~ s/ύ/υ/g; - $s =~ s/ϋ/υ/g; - $s =~ s/ΰ/υ/g; - $s =~ s/ώ/ω/g; - $s =~ s/Ά/Α/g; - $s =~ s/Έ/Ε/g; - $s =~ s/Ή/Η/g; - $s =~ s/Ί/Ι/g; - $s =~ s/Ϊ/Ι/g; - $s =~ s/Ύ/Υ/g; - $s =~ s/Ϋ/Υ/g; - $s =~ s/Ώ/Ω/g; - } - # Cyrillic letters - if ($s =~ /\xD0[\x80-\xAF]/) { - $s =~ s/Ѐ/Е/g; - $s =~ s/Ё/Е/g; - $s =~ s/Ѓ/Г/g; - $s =~ s/Ќ/К/g; - $s =~ s/Ѝ/И/g; - $s =~ s/Й/И/g; - $s =~ s/ѐ/е/g; - $s =~ s/ё/е/g; - $s =~ s/ѓ/г/g; - $s =~ s/ќ/к/g; - $s =~ s/ѝ/и/g; - $s =~ s/й/и/g; - } - } - return $s; -} - -sub read_de_accent_case_resource { - local($this, $filename, *ht, *LOG, $verbose) = @_; - # e.g. data/char-de-accent-lc.txt - - if (open(IN, $filename)) { - my $mode = "de-accent"; - my $line_number = 0; - my $n_de_accent_targets = 0; - my $n_de_accent_sources = 0; - my $n_case_entries = 0; - while () { - s/^\xEF\xBB\xBF//; - s/\s*$//; - $line_number++; - if ($_ =~ /^#+\s*CASE\b/) { - $mode = "case"; - } elsif ($_ =~ /^#+\s*PUNCTUATION NORMALIZATION\b/) { - $mode = "punctuation-normalization"; - } elsif ($_ =~ /^#/) { - # ignore comment - } elsif ($_ =~ /^\s*$/) { - # ignore empty line - } elsif (($mode eq "de-accent") && (($char_without_accent, @chars_with_accent) = split(/\s+/, $_))) { - if (keys %{$ht{DE_ACCENT_INV}->{$char_without_accent}}) { - print LOG "Ignoring duplicate de-accent line for target $char_without_accent in l.$line_number in $filename\n" unless $char_without_accent eq "--"; - } elsif (@chars_with_accent) { - $n_de_accent_targets++; - foreach $char_with_accent (@chars_with_accent) { - my @prev_target_chars = keys %{$ht{DE_ACCENT}->{$char_with_accent}}; - print LOG "Accent character $char_with_accent has duplicate target $char_without_accent (besides @prev_target_chars) in l.$line_number in $filename\n" if @prev_target_chars && (! ($char_without_accent =~ /^[aou]e$/i)); - $char_without_accent = "" if $char_without_accent eq "--"; - $ht{DE_ACCENT}->{$char_with_accent}->{$char_without_accent} = 1; - $ht{DE_ACCENT1}->{$char_with_accent} = $char_without_accent - if (! defined($ht{DE_ACCENT1}->{$char_with_accent})) - && ($char_without_accent =~ /^.[\x80-\xBF]*$/); - $ht{DE_ACCENT_INV}->{$char_without_accent}->{$char_with_accent} = 1; - $ht{UPPER_CASE_OR_ACCENTED}->{$char_with_accent} = 1; - $n_de_accent_sources++; - } - } else { - print LOG "Empty de-accent list for $char_without_accent in l.$line_number in $filename\n"; - } - } elsif (($mode eq "punctuation-normalization") && (($norm_punct, @unnorm_puncts) = split(/\s+/, $_))) { - if (keys %{$ht{NORM_PUNCT_INV}->{$norm_punct}}) { - print LOG "Ignoring duplicate punctuation-normalization line for target $norm_punct in l.$line_number in $filename\n"; - } elsif (@unnorm_puncts) { - foreach $unnorm_punct (@unnorm_puncts) { - my $prev_norm_punct = $ht{NORM_PUNCT}->{$unnorm_punct}; - if ($prev_norm_punct) { - print LOG "Ignoring duplicate punctuation normalization $unnorm_punct -> $norm_punct (besides $prev_norm_punct) in l.$line_number in $filename\n"; - } - $ht{NORM_PUNCT}->{$unnorm_punct} = $norm_punct; - $ht{NORM_PUNCT_INV}->{$norm_punct}->{$unnorm_punct} = 1; - $ht{LC_DE_ACCENT_CHAR_NORM_PUNCT}->{$unnorm_punct} = $norm_punct; - } - } - } elsif (($mode eq "case") && (($uc_char, $lc_char) = ($_ =~ /^(\S+)\s+(\S+)\s*$/))) { - $ht{UPPER_TO_LOWER_CASE}->{$uc_char} = $lc_char; - $ht{LOWER_TO_UPPER_CASE}->{$lc_char} = $uc_char; - $ht{UPPER_CASE_P}->{$uc_char} = 1; - $ht{LOWER_CASE_P}->{$lc_char} = 1; - $ht{UPPER_CASE_OR_ACCENTED}->{$uc_char} = 1; - $n_case_entries++; - } else { - print LOG "Unrecognized l.$line_number in $filename\n"; - } - } - foreach $char (keys %{$ht{UPPER_CASE_OR_ACCENTED}}) { - my $lc_char = $ht{UPPER_TO_LOWER_CASE}->{$char}; - $lc_char = $char unless defined($lc_char); - my @de_accend_char_results = sort keys %{$ht{DE_ACCENT}->{$lc_char}}; - my $new_char = (@de_accend_char_results) ? $de_accend_char_results[0] : $lc_char; - $ht{LC_DE_ACCENT_CHAR}->{$char} = $new_char; - $ht{LC_DE_ACCENT_CHAR_NORM_PUNCT}->{$char} = $new_char; - } - close(IN); - print LOG "Found $n_case_entries case entries, $n_de_accent_sources/$n_de_accent_targets source/target entries in $line_number lines in file $filename\n" if $verbose; - } else { - print LOG "Can't open $filename\n"; - } -} - -sub de_accent_char { - local($this, $char, *ht, $default) = @_; - - @de_accend_char_results = sort keys %{$ht{DE_ACCENT}->{$char}}; - return (@de_accend_char_results) ? @de_accend_char_results : ($default); -} - -sub lower_case_char { - local($this, $char, *ht, $default) = @_; - - return (defined($lc = $ht{UPPER_TO_LOWER_CASE}->{$char})) ? $lc : $default; -} - -sub lower_case_and_de_accent_char { - local($this, $char, *ht) = @_; - - my $lc_char = $this->lower_case_char($char, *ht, $char); - return $this->de_accent_char($lc_char, *ht, $lc_char); -} - -sub lower_case_and_de_accent_string { - local($this, $string, *ht, $control) = @_; - - # $this->stopwatch("start", "lower_case_and_de_accent_string", *ht, *LOG); - my $norm_punct_p = ($control && ($control =~ /norm-punct/i)); - my @chars = $this->split_into_utf8_characters($string); - my $result = ""; - foreach $char (@chars) { - my @lc_de_accented_chars = $this->lower_case_and_de_accent_char($char, *ht); - if ($norm_punct_p - && (! @lc_de_accented_chars)) { - my $norm_punct = $ht{NORM_PUNCT}->{$char}; - @lc_de_accented_chars = ($norm_punct) if $norm_punct; - } - $result .= ((@lc_de_accented_chars) ? $lc_de_accented_chars[0] : $char); - } - # $this->stopwatch("end", "lower_case_and_de_accent_string", *ht, *LOG); - return $result; -} - -sub lower_case_and_de_accent_norm_punct { - local($this, $char, *ht) = @_; - - my $new_char = $ht{LC_DE_ACCENT_CHAR_NORM_PUNCT}->{$char}; - return (defined($new_char)) ? $new_char : $char; -} - -sub lower_case_and_de_accent_string2 { - local($this, $string, *ht, $control) = @_; - - my $norm_punct_p = ($control && ($control =~ /norm-punct/i)); - # $this->stopwatch("start", "lower_case_and_de_accent_string2", *ht, *LOG); - my $s = $string; - my $result = ""; - while (($char, $rest) = ($s =~ /^(.[\x80-\xBF]*)(.*)$/)) { - my $new_char = $ht{LC_DE_ACCENT_CHAR}->{$char}; - if (defined($new_char)) { - $result .= $new_char; - } elsif ($norm_punct_p && defined($new_char = $ht{NORM_PUNCT}->{$char})) { - $result .= $new_char; - } else { - $result .= $char; - } - $s = $rest; - } - # $this->stopwatch("end", "lower_case_and_de_accent_string2", *ht, *LOG); - return $result; -} - -sub lower_case_string { - local($this, $string, *ht, $control) = @_; - - my $norm_punct_p = ($control && ($control =~ /norm-punct/i)); - my $s = $string; - my $result = ""; - while (($char, $rest) = ($s =~ /^(.[\x80-\xBF]*)(.*)$/)) { - my $lc_char = $ht{UPPER_TO_LOWER_CASE}->{$char}; - if (defined($lc_char)) { - $result .= $lc_char; - } elsif ($norm_punct_p && defined($new_char = $ht{NORM_PUNCT}->{$char})) { - $result .= $new_char; - } else { - $result .= $char; - } - $s = $rest; - } - return $result; -} - -sub round_to_n_decimal_places { - local($this, $x, $n, $fill_decimals_p) = @_; - - $fill_decimals_p = 0 unless defined($fill_decimals_p); - unless (defined($x)) { - return $x; - } - if (($x =~ /^-?\d+$/) && (! $fill_decimals_p)) { - return $x; - } - $factor = 1; - foreach $i ((1 .. $n)) { - $factor *= 10; - } - my $rounded_number; - if ($x > 0) { - $rounded_number = (int(($factor * $x) + 0.5) / $factor); - } else { - $rounded_number = (int(($factor * $x) - 0.5) / $factor); - } - if ($fill_decimals_p) { - ($period, $decimals) = ($rounded_number =~ /^-?\d+(\.?)(\d*)$/); - $rounded_number .= "." unless $period || ($n == 0); - foreach ((1 .. ($n - length($decimals)))) { - $rounded_number .= 0; - } - } - return $rounded_number; -} - -sub commify { - local($caller,$number) = @_; - - my $text = reverse $number; - $text =~ s/(\d\d\d)(?=\d)(?!\d*\.)/$1,/g; - return scalar reverse $text; -} - -sub add_javascript_functions { - local($caller,@function_names) = @_; - - $add_javascript_function_s = ""; - foreach $function_name (@function_names) { - - if ($function_name eq "highlight_elems") { - $add_javascript_function_s .= " - function highlight_elems(group_id, value) { - if (group_id != '') { - i = 1; - id = group_id + '-' + i; - while ((s = document.getElementById(id)) != null) { - if (! s.origColor) { - if (s.style.color) { - s.origColor = s.style.color; - } else { - s.origColor = '#000000'; - } - } - if (value == '1') { - s.style.color = '#0000FF'; - if (s.innerHTML == '-') { - s.style.innerHtml = s.innerHTML; - s.innerHTML = '-   ← here'; - s.style.fontWeight = 900; - } else { - s.style.fontWeight = 'bold'; - } - } else { - s.style.fontWeight = 'normal'; - s.style.color = s.origColor; - if (s.style.innerHtml != null) { - s.innerHTML = s.style.innerHtml; - } - } - i = i + 1; - id = group_id + '-' + i; - } - } - } -"; - } elsif ($function_name eq "set_style_for_ids") { - $add_javascript_function_s .= " - function set_style_for_ids(style,id_list) { - var ids = id_list.split(/\\s+/); - var len = ids.length; - var s; - for (var i=0; i>$filename")) { - print OUT $s; - close(OUT); - $result = "Appended"; - } else { - $result = "Can't append"; - } - } else { - if (open(OUT, ">$filename")) { - print OUT $s; - close(OUT); - $result = "Wrote"; - } else { - $result = "Can't write"; - } - } - chmod($mod, $filename) if defined($mod) && -e $filename; - return $result; -} - -sub square { - local($caller, $x) = @_; - - return $x * $x; -} - -sub mutual_info { - local($caller, $ab_count, $a_count, $b_count, $total_count, $smoothing) = @_; - - $smoothing = 1 unless defined($smoothing); - $ab_count = 0 unless defined($ab_count); - return 0 unless $a_count && $b_count && $total_count; - - my $p_ab = $ab_count / $total_count; - my $p_a = $a_count / $total_count; - my $p_b = $b_count / $total_count; - my $expected_ab = $p_a * $p_b * $total_count; - - return -99 unless $expected_ab || $smoothing; - - return CORE::log(($ab_count + $smoothing) / ($expected_ab + $smoothing)); -} - -sub mutual_info_multi { - local($caller, $multi_count, $total_count, $smoothing, @counts) = @_; - - return 0 unless $total_count; - my $p_indivuals = 1; - foreach $count (@counts) { - return 0 unless $count; - $p_indivuals *= ($count / $total_count); - } - my $expected_multi_count = $p_indivuals * $total_count; - # print STDERR "actual vs. expected multi_count($multi_count, $total_count, $smoothing, @counts) = $multi_count vs. $expected_multi_count\n"; - - return -99 unless $expected_multi_count || $smoothing; - - return CORE::log(($multi_count + $smoothing) / ($expected_multi_count + $smoothing)); -} - -sub precision_recall_fmeasure { - local($caller, $n_gold, $n_test, $n_shared, $pretty_print_p) = @_; - - unless (($n_gold =~ /^[1-9]\d*$/) && ($n_test =~ /^[1-9]\d*$/)) { - $zero = ($pretty_print_p) ? "0%" : 0; - if ($n_gold =~ /^[1-9]\d*$/) { - return ("n/a", $zero, $zero); - } elsif ($n_test =~ /^[1-9]\d*$/) { - return ($zero, "n/a", $zero); - } else { - return ("n/a", "n/a", "n/a"); - } - } - my $precision = $n_shared / $n_test; - my $recall = $n_shared / $n_gold; - my $f_measure = ($precision * $recall * 2) / ($precision + $recall); - - return ($precision, $recall, $f_measure) unless $pretty_print_p; - - my $pretty_precision = $caller->round_to_n_decimal_places(100*$precision, 1) . "%"; - my $pretty_recall = $caller->round_to_n_decimal_places(100*$recall, 1) . "%"; - my $pretty_f_measure = $caller->round_to_n_decimal_places(100*$f_measure, 1) . "%"; - - return ($pretty_precision, $pretty_recall, $pretty_f_measure); -} - -sub recapitalize_named_entity { - local($caller, $s) = @_; - - my @comps = (); - foreach $comp (split(/\s+/, $s)) { - if ($comp =~ /^(and|da|for|of|on|the|van|von)$/) { - push(@comps, $comp); - } elsif ($comp =~ /^[a-z]/) { - push(@comps, ucfirst $comp); - } else { - push(@comps, $comp); - } - } - return join(" ", @comps); -} - -sub slot_value_in_double_colon_del_list { - local($this, $s, $slot, $default) = @_; - - $default = "" unless defined($default); - if (($value) = ($s =~ /::$slot\s+(\S.*\S|\S)\s*$/)) { - $value =~ s/\s*::\S.*\s*$//; - return $value; - } else { - return $default; - } -} - -sub synt_in_double_colon_del_list { - local($this, $s) = @_; - - ($value) = ($s =~ /::synt\s+(\S+|\S.*?\S)(?:\s+::.*)?$/); - return (defined($value)) ? $value : ""; -} - -sub form_in_double_colon_del_list { - local($this, $s) = @_; - - ($value) = ($s =~ /::form\s+(\S+|\S.*?\S)(?:\s+::.*)?$/); - return (defined($value)) ? $value : ""; -} - -sub lex_in_double_colon_del_list { - local($this, $s) = @_; - - ($value) = ($s =~ /::lex\s+(\S+|\S.*?\S)(?:\s+::.*)?$/); - return (defined($value)) ? $value : ""; -} - -sub multi_slot_value_in_double_colon_del_list { - # e.g. when there are multiple slot/value pairs in a line, e.g. ::eng ... :eng ... - local($this, $s, $slot) = @_; - - @values = (); - while (($value, $rest) = ($s =~ /::$slot\s+(\S|\S.*?\S)(\s+::\S.*|\s*)$/)) { - push(@values, $value); - $s = $rest; - } - return @values; -} - -sub remove_slot_in_double_colon_del_list { - local($this, $s, $slot) = @_; - - $s =~ s/::$slot(?:|\s+\S|\s+\S.*?\S)(\s+::\S.*|\s*)$/$1/; - $s =~ s/^\s*//; - return $s; -} - -sub extract_split_info_from_split_dir { - local($this, $dir, *ht) = @_; - - my $n_files = 0; - my $n_snt_ids = 0; - if (opendir(DIR, $dir)) { - my @filenames = sort readdir(DIR); - closedir(DIR); - foreach $filename (@filenames) { - next unless $filename =~ /\.txt$/; - my $split_class; - if (($split_class) = ($filename =~ /-(dev|training|test)-/)) { - my $full_filename = "$dir/$filename"; - if (open(IN, $full_filename)) { - my $old_n_snt_ids = $n_snt_ids; - while () { - if (($snt_id) = ($_ =~ /^#\s*::id\s+(\S+)/)) { - if ($old_split_class = $ht{SPLIT_CLASS}->{$snt_id}) { - unless ($old_split_class eq $split_class) { - print STDERR "Conflicting split class for $snt_id: $old_split_class $split_class\n"; - } - } else { - $ht{SPLIT_CLASS}->{$snt_id} = $split_class; - $ht{SPLIT_CLASS_COUNT}->{$split_class} = ($ht{SPLIT_CLASS_COUNT}->{$split_class} || 0) + 1; - $n_snt_ids++; - } - } - } - $n_files++ unless $n_snt_ids == $old_n_snt_ids; - close(IN); - } else { - print STDERR "Can't open file $full_filename"; - } - } else { - print STDERR "Skipping file $filename when extracting split info from $dir\n"; - } - } - print STDERR "Extracted $n_snt_ids split classes from $n_files files.\n"; - } else { - print STDERR "Can't open directory $dir to extract split info.\n"; - } -} - -sub extract_toks_for_split_class_from_dir { - local($this, $dir, *ht, $split_class, $control) = @_; - - $control = "" unless defined($control); - $print_snt_id_p = ($control =~ /\bwith-snt-id\b/); - my $n_files = 0; - my $n_snts = 0; - if (opendir(DIR, $dir)) { - my @filenames = sort readdir(DIR); - closedir(DIR); - foreach $filename (@filenames) { - next unless $filename =~ /^alignment-release-.*\.txt$/; - my $full_filename = "$dir/$filename"; - if (open(IN, $full_filename)) { - my $old_n_snts = $n_snts; - my $snt_id = ""; - while () { - if (($s_value) = ($_ =~ /^#\s*::id\s+(\S+)/)) { - $snt_id = $s_value; - $proper_split_class_p - = ($this_split_class = $ht{SPLIT_CLASS}->{$snt_id}) - && ($this_split_class eq $split_class); - } elsif (($tok) = ($_ =~ /^#\s*::tok\s+(\S|\S.*\S)\s*$/)) { - if ($proper_split_class_p) { - print "$snt_id " if $print_snt_id_p; - print "$tok\n"; - $n_snts++; - } - } - } - $n_files++ unless $n_snts == $old_n_snts; - close(IN); - } else { - print STDERR "Can't open file $full_filename"; - } - } - print STDERR "Extracted $n_snts tokenized sentences ($split_class) from $n_files files.\n"; - } else { - print STDERR "Can't open directory $dir to extract tokens.\n"; - } -} - -sub load_relevant_tok_ngram_corpus { - local($this, $filename, *ht, $max_lex_rule_span, $ngram_count_min, $optional_ngram_output_filename) = @_; - - $ngram_count_min = 1 unless $ngram_count_min; - $max_lex_rule_span = 10 unless $max_lex_rule_span; - my $n_ngram_instances = 0; - my $n_ngram_types = 0; - if (open(IN, $filename)) { - while () { - s/\s*$//; - @tokens = split(/\s+/, $_); - foreach $from_token_index ((0 .. $#tokens)) { - foreach $to_token_index (($from_token_index .. ($from_token_index + $max_lex_rule_span -1))) { - last if $to_token_index > $#tokens; - my $ngram = join(" ", @tokens[$from_token_index .. $to_token_index]); - $ht{RELEVANT_NGRAM}->{$ngram} = ($ht{RELEVANT_NGRAM}->{$ngram} || 0) + 1; - } - } - } - close(IN); - if ($optional_ngram_output_filename && open(OUT, ">$optional_ngram_output_filename")) { - foreach $ngram (sort keys %{$ht{RELEVANT_NGRAM}}) { - $count = $ht{RELEVANT_NGRAM}->{$ngram}; - next unless $count >= $ngram_count_min; - print OUT "($count) $ngram\n"; - $n_ngram_types++; - $n_ngram_instances += $count; - } - close(OUT); - print STDERR "Extracted $n_ngram_types ngram types, $n_ngram_instances ngram instances.\n"; - print STDERR "Wrote ngram stats to $optional_ngram_output_filename\n"; - } - } else { - print STDERR "Can't open relevant tok ngram corpus $filename\n"; - } -} - -sub load_relevant_tok_ngrams { - local($this, $filename, *ht) = @_; - - my $n_entries = 0; - if (open(IN, $filename)) { - while () { - s/\s*$//; - if (($count, $ngram) = ($_ =~ /^\((\d+)\)\s+(\S|\S.*\S)\s*$/)) { - $lc_ngram = lc $ngram; - $ht{RELEVANT_NGRAM}->{$lc_ngram} = ($ht{RELEVANT_NGRAM}->{$lc_ngram} || 0) + $count; - $ht{RELEVANT_LC_NGRAM}->{$lc_ngram} = ($ht{RELEVANT_LC_NGRAM}->{$lc_ngram} || 0) + $count; - $n_entries++; - } - } - close(IN); - print STDERR "Read in $n_entries entries from $filename\n"; - } else { - print STDERR "Can't open relevant tok ngrams from $filename\n"; - } -} - -sub snt_id_sort_function { - local($this, $a, $b) = @_; - - if ((($core_a, $index_a) = ($a =~ /^(\S+)\.(\d+)$/)) - && (($core_b, $index_b) = ($b =~ /^(\S+)\.(\d+)$/))) { - return ($core_a cmp $core_b) || ($index_a <=> $index_b); - } else { - return $a cmp $b; - } -} - -sub count_value_sort_function { - local($this, $a_count, $b_count, $a_value, $b_value, $control) = @_; - - # normalize fractions such as "1/2" - if ($a_count > $b_count) { - return ($control eq "decreasing") ? -1 : 1; - } elsif ($b_count > $a_count) { - return ($control eq "decreasing") ? 1 : -1; - } - $a_value = $num / $den if ($num, $den) = ($a_value =~ /^([1-9]\d*)\/([1-9]\d*)$/); - $b_value = $num / $den if ($num, $den) = ($b_value =~ /^([1-9]\d*)\/([1-9]\d*)$/); - $a_value =~ s/:/\./ if $a_value =~ /^\d+:\d+$/; - $b_value =~ s/:/\./ if $b_value =~ /^\d+:\d+$/; - if (($a_value =~ /^-?\d+(\.\d+)?$/) - && ($b_value =~ /^-?\d+(\.\d+)?$/)) { - return $a_value <=> $b_value; - } elsif ($a_value =~ /^-?\d+(\.\d+)?$/) { - return 1; - } elsif ($b_value =~ /^-?\d+(\.\d+)?$/) { - return -1; - } else { - return $a_value cmp $b_value; - } -} - -sub undef_to_blank { - local($this, $x) = @_; - - return (defined($x)) ? $x : ""; -} - -sub en_lex_amr_list { - local($this, $s) = @_; - - $bpe = qr{ \( (?: (?> [^()]+ ) | (??{ $bpe }))* \) }x; # see Perl Cookbook 2nd ed. p. 218 - @en_lex_amr_list = (); - my $amr_s; - my $lex; - my $test; - while ($s =~ /\S/) { - $s =~ s/^\s*//; - if (($s =~ /^\([a-z]\d* .*\)/) - && (($amr_s, $rest) = ($s =~ /^($bpe)(\s.*|)$/))) { - push(@en_lex_amr_list, $amr_s); - $s = $rest; - } elsif (($lex, $rest) = ($s =~ /^\s*(\S+)(\s.*|)$/)) { - push(@en_lex_amr_list, $lex); - $s = $rest; - } else { - print STDERR "en_lex_amr_list can't process: $s\n"; - $s = ""; - } - } - return @en_lex_amr_list; -} - -sub make_sure_dir_exists { - local($this, $dir, $umask) = @_; - - mkdir($dir, $umask) unless -d $dir; - chmod($umask, $dir); -} - -sub pretty_percentage { - local($this, $numerator, $denominator) = @_; - - return ($denominator == 0) ? "n/a" : ($this->round_to_n_decimal_places(100*$numerator/$denominator, 2) . "%"); -} - -sub html_color_nth_line { - local($this, $s, $n, $color, $delimiter) = @_; - - $delimiter = "
" unless defined($delimiter); - @lines = split($delimiter, $s); - $lines[$n] = "" . $lines[$n] . "" if ($n =~ /^\d+$/) && ($n <= $#lines); - return join($delimiter, @lines); -} - -sub likely_valid_url_format { - local($this, $url) = @_; - - $url = lc $url; - return 0 if $url =~ /\s/; - return 0 if $url =~ /[@]/; - return 1 if $url =~ /^https?:\/\/.+\.[a-z]+(\?.+)?$/; - return 1 if $url =~ /[a-z].+\.(com|edu|gov|net|org)$/; - return 0; -} - -# see also EnglMorph->special_token_type -$common_file_suffixes = "aspx?|bmp|cgi|docx?|gif|html?|jpeg|jpg|mp3|mp4|pdf|php|png|pptx?|stm|svg|txt|xml"; -$common_top_domain_suffixes = "museum|info|cat|com|edu|gov|int|mil|net|org|ar|at|au|be|bg|bi|br|ca|ch|cn|co|cz|de|dk|es|eu|fi|fr|gr|hk|hu|id|ie|il|in|ir|is|it|jp|ke|kr|lu|mg|mx|my|nl|no|nz|ph|pl|pt|ro|rs|ru|rw|se|sg|sk|so|tr|tv|tw|tz|ua|ug|uk|us|za"; - -sub token_is_url_p { - local($this, $token) = @_; - - return 1 if $token =~ /^www(\.[a-z0-9]([-a-z0-9_]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+)+\.([a-z]{2,2}|$common_top_domain_suffixes)(\/(\.{1,3}|[a-z0-9]([-a-z0-9_%]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+))*(\/[a-z0-9_][-a-z0-9_]+\.($common_file_suffixes))?$/i; - return 1 if $token =~ /^https?:\/\/([a-z]\.)?([a-z0-9]([-a-z0-9_]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+\.)+[a-z]{2,}(\/(\.{1,3}|([-a-z0-9_%]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+))*(\/[a-z_][-a-z0-9_]+\.($common_file_suffixes))?$/i; - return 1 if $token =~ /^[a-z][-a-z0-9_]+(\.[a-z][-a-z0-9_]+)*\.($common_top_domain_suffixes)(\/[a-z0-9]([-a-z0-9_%]|\xC3[\x80-\x96\x98-\xB6\xB8-\xBF])+)*(\/[a-z][-a-z0-9_]+\.($common_file_suffixes))?$/i; - return 0; -} - -sub token_is_email_p { - local($this, $token) = @_; - - return ($token =~ /^[a-z][-a-z0-9_]+(\.[a-z][-a-z0-9_]+)*\@[a-z][-a-z0-9_]+(\.[a-z][-a-z0-9_]+)*\.($common_top_domain_suffixes)$/i); -} - -sub token_is_filename_p { - local($this, $token) = @_; - - return 1 if $token =~ /\.($common_file_suffixes)$/; - return 0; -} - -sub token_is_xml_token_p { - local($this, $token) = @_; - - return ($token =~ /^&(amp|apos|gt|lt|nbsp|quot|&#\d+|&#x[0-9A-F]+);$/i); -} - -sub token_is_handle_p { - local($this, $token) = @_; - - return ($token =~ /^\@[a-z][_a-z0-9]*[a-z0-9]$/i); -} - -sub min { - local($this, @list) = @_; - - my $min = ""; - foreach $item (@list) { - $min = $item if ($item =~ /^-?\d+(?:\.\d*)?$/) && (($min eq "") || ($item < $min)); - } - return $min; -} - -sub max { - local($this, @list) = @_; - - my $max = ""; - foreach $item (@list) { - $max = $item if defined($item) && ($item =~ /^-?\d+(?:\.\d*)?(e[-+]\d+)?$/) && (($max eq "") || ($item > $max)); - } - return $max; -} - -sub split_tok_s_into_tokens { - local($this, $tok_s) = @_; - - @token_list = (); - while (($pre, $link_token, $post) = ($tok_s =~ /^(.*?)\s*(\@?<[^<>]+>\@?)\s*(.*)$/)) { - # generate dummy token for leading blank(s) - if (($tok_s =~ /^\s/) && ($pre eq "") && ($#token_list < 0)) { - push(@token_list, ""); - } else { - push(@token_list, split(/\s+/, $pre)); - } - push(@token_list, $link_token); - $tok_s = $post; - } - push(@token_list, split(/\s+/, $tok_s)); - return @token_list; -} - -sub shuffle { - local($this, @list) = @_; - - @shuffle_list = (); - while (@list) { - $len = $#list + 1; - $rand_position = int(rand($len)); - push(@shuffle_list, $list[$rand_position]); - splice(@list, $rand_position, 1); - } - $s = join(" ", @shuffle_list); - return @shuffle_list; -} - -sub timestamp_to_seconds { - local($this, $timestamp) = @_; - - my $epochtime; - if (($year, $month, $day, $hour, $minute, $second) = ($timestamp =~ /^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)$/)) { - $epochtime = timelocal($second, $minute, $hour, $day, $month-1, $year); - } elsif (($year, $month, $day) = ($timestamp =~ /^(\d\d\d\d)-(\d\d)-(\d\d)$/)) { - $epochtime = timelocal(0, 0, 0, $day, $month-1, $year); - } elsif (($year, $month, $day, $hour, $minute, $second, $second_fraction) = ($timestamp =~ /^(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)\.(\d+)$/)) { - $epochtime = timelocal($second, $minute, $hour, $day, $month-1, $year) + ($second_fraction / (10 ** length($second_fraction))); - } else { - $epochtime = 0; - } - return $epochtime; -} - -sub timestamp_diff_in_seconds { - local($this, $timestamp1, $timestamp2) = @_; - - my $epochtime1 = $this->timestamp_to_seconds($timestamp1); - my $epochtime2 = $this->timestamp_to_seconds($timestamp2); - return $epochtime2 - $epochtime1; -} - -sub dirhash { - # maps string to hash of length 4 with characters [a-z2-8] (shorter acc. to $len) - local($this, $s, $len) = @_; - - $hash = 9999; - $mega = 2 ** 20; - $mega1 = $mega - 1; - $giga = 2 ** 26; - foreach $c (split //, $s) { - $hash = $hash*33 + ord($c); - $hash = ($hash >> 20) ^ ($hash & $mega1) if $hash >= $giga; - } - while ($hash >= $mega) { - $hash = ($hash >> 20) ^ ($hash & $mega1); - } - $result = ""; - while ($hash) { - $c = $hash & 31; - $result .= CORE::chr($c + (($c >= 26) ? 24 : 97)); - $hash = $hash >> 5; - } - while (length($result) < 4) { - $result .= "8"; - } - return substr($result, 0, $len) if $len; - return $result; -} - -sub full_path_python { - - foreach $bin_path (split(":", "/usr/sbin:/usr/bin:/bin:/usr/local/bin")) { - return $python if -x ($python = "$bin_path/python"); - } - return "python"; -} - -sub string_contains_unbalanced_paras { - local($this, $s) = @_; - - return 0 unless $s =~ /[(){}\[\]]/; - $rest = $s; - while (($pre,$left,$right,$post) = ($rest =~ /^(.*)([({\[]).*?([\]})])(.*)$/)) { - return 1 unless (($left eq "(") && ($right eq ")")) - || (($left eq "[") && ($right eq "]")) - || (($left eq "{") && ($right eq "}")); - $rest = "$pre$post"; - } - return 1 if $rest =~ /[(){}\[\]]/; - return 0; -} - -sub dequote_string { - local($this, $s) = @_; - - if ($s =~ /^".*"$/) { - $s = substr($s, 1, -1); - $s =~ s/\\"/"/g; - return $s; - } elsif ($s =~ /^'.*'$/) { - $s = substr($s, 1, -1); - $s =~ s/\\'/'/g; - return $s; - } else { - return $s; - } -} - -sub defined_non_space { - local($this, $s) = @_; - - return (defined($s) && ($s =~ /\S/)); -} - -sub default_if_undefined { - local($this, $s, $default) = @_; - - return (defined($s) ? $s : $default); -} - -sub remove_empties { - local($this, @list) = @_; - - @filtered_list = (); - foreach $elem (@list) { - push(@filtered_list, $elem) if defined($elem) && (! ($elem =~ /^\s*$/)) && (! $this->member($elem, @filtered_list)); - } - - return @filtered_list; -} - -# copied from AMRexp.pm -sub new_var_for_surf_amr { - local($this, $amr_s, $s) = @_; - - my $letter = ($s =~ /^[a-z]/i) ? lc substr($s, 0, 1) : "x"; - return $letter unless ($amr_s =~ /:\S+\s+\($letter\s+\//) - || ($amr_s =~ /\s\($letter\s+\//) - || ($amr_s =~ /^\s*\($letter\s+\//); # ))) - my $i = 2; - while (($amr_s =~ /:\S+\s+\($letter$i\s+\//) - || ($amr_s =~ /\s+\($letter$i\s+\//) - || ($amr_s =~ /^\s*\($letter$i\s+\//)) { # ))) - $i++; - } - return "$letter$i"; -} - -# copied from AMRexp.pm -sub new_vars_for_surf_amr { - local($this, $amr_s, $ref_amr_s) = @_; - - my $new_amr_s = ""; - my %new_var_ht = (); - my $remaining_amr_s = $amr_s; - my $pre; my $var; my $concept; my $post; - while (($pre, $var, $concept, $post) = ($remaining_amr_s =~ /^(.*?\()([a-z]\d*)\s+\/\s+([^ ()\s]+)(.*)$/s)) { - $new_var = $this->new_var_for_surf_amr("$ref_amr_s $new_amr_s", $concept); - $new_var_ht{$var} = $new_var; - $new_amr_s .= "$pre$new_var / $concept"; - $remaining_amr_s = $post; - } - $new_amr_s .= $remaining_amr_s; - - # also update any reentrancy variables - $remaining_amr_s = $new_amr_s; - $new_amr_s2 = ""; - while (($pre, $var, $post) = ($remaining_amr_s =~ /^(.*?:\S+\s+)([a-z]\d*)([ ()\s].*)$/s)) { - $new_var = $new_var_ht{$var} || $var; - $new_amr_s2 .= "$pre$new_var"; - $remaining_amr_s = $post; - } - $new_amr_s2 .= $remaining_amr_s; - - return $new_amr_s2; -} - -sub update_inner_span_for_id { - local($this, $html_line, $slot, $new_value) = @_; - # e.g. slot: workset-language-name value: Uyghur - - if (defined($new_value) - && (($pre, $old_value, $post) = ($html_line =~ /^(.*]* id="$slot"[^<>]*>)([^<>]*)(<\/span\b[^<>]*>.*)$/i)) - && ($old_value ne $new_value)) { - # print STDERR "Inserting new $slot $old_value -> $new_value\n"; - return $pre . $new_value . $post . "\n"; - } else { - # no change - return $html_line; - } -} - -sub levenshtein_distance { - local($this, $s1, $s2) = @_; - - my $i; - my $j; - my @distance; - my @s1_chars = $utf8->split_into_utf8_characters($s1, "return only chars", *empty_ht); - my $s1_length = $#s1_chars + 1; - my @s2_chars = $utf8->split_into_utf8_characters($s2, "return only chars", *empty_ht); - my $s2_length = $#s2_chars + 1; - for ($i = 0; $i <= $s1_length; $i++) { - $distance[$i][0] = $i; - } - for ($j = 1; $j <= $s2_length; $j++) { - $distance[0][$j] = $j; - } - for ($j = 1; $j <= $s2_length; $j++) { - for ($i = 1; $i <= $s1_length; $i++) { - my $substitution_cost = ($s1_chars[$i-1] eq $s2_chars[$j-1]) ? 0 : 1; - $distance[$i][$j] = $this->min($distance[$i-1][$j] + 1, - $distance[$i][$j-1] + 1, - $distance[$i-1][$j-1] + $substitution_cost); - # print STDERR "SC($i,$j) = $substitution_cost\n"; - # $d = $distance[$i][$j]; - # print STDERR "D($i,$j) = $d\n"; - } - } - return $distance[$s1_length][$s2_length]; -} - -sub markup_parts_of_string_in_common_with_ref { - local($this, $s, $ref, $start_markup, $end_markup, $deletion_markup, $verbose) = @_; - - # \x01 temporary start-markup - # \x02 temporary end-markup - # \x03 temporary deletion-markup - $s =~ s/[\x01-\x03]//g; - $ref =~ s/[\x01-\x03]//g; - my $i; - my $j; - my @distance; - my @s_chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht); - my $s_length = $#s_chars + 1; - my @ref_chars = $utf8->split_into_utf8_characters($ref, "return only chars", *empty_ht); - my $ref_length = $#ref_chars + 1; - $distance[0][0] = 0; - $del_ins_subst_op[0][0] = "-"; - for ($i = 1; $i <= $s_length; $i++) { - $distance[$i][0] = $i; - $del_ins_subst_op[$i][0] = 0; - } - for ($j = 1; $j <= $ref_length; $j++) { - $distance[0][$j] = $j; - $del_ins_subst_op[0][$j] = 1; - } - for ($j = 1; $j <= $ref_length; $j++) { - for ($i = 1; $i <= $s_length; $i++) { - my $substitution_cost = (($s_chars[$i-1] eq $ref_chars[$j-1])) ? 0 : 1; - my @del_ins_subst_list = ($distance[$i-1][$j] + 1, - $distance[$i][$j-1] + 1, - $distance[$i-1][$j-1] + $substitution_cost); - my $min = $this->min(@del_ins_subst_list); - my $del_ins_subst_position = $this->position($min, @del_ins_subst_list); - $distance[$i][$j] = $min; - $del_ins_subst_op[$i][$j] = $del_ins_subst_position; - } - } - $d = $distance[$s_length][$ref_length]; - print STDERR "markup_parts_of_string_in_common_with_ref LD($s,$ref) = $d\n" if $verbose; - for ($j = 0; $j <= $ref_length; $j++) { - for ($i = 0; $i <= $s_length; $i++) { - $d = $distance[$i][$j]; - $op = $del_ins_subst_op[$i][$j]; - print STDERR "$d($op) " if $verbose; - } - print STDERR "\n" if $verbose; - } - my $result = ""; - my $i_end = $s_length; - my $j_end = $ref_length; - my $cost = $distance[$i_end][$j_end]; - $i = $i_end; - $j = $j_end; - while (1) { - $result2 = $result; - $result2 =~ s/\x01/$start_markup/g; - $result2 =~ s/\x02/$end_markup/g; - $result2 =~ s/\x03/$deletion_markup/g; - print STDERR "i:$i i-end:$i_end j:$j j-end:$j_end r: $result2\n" if $verbose; - # matching characters - if ($i && $j && ($del_ins_subst_op[$i][$j] == 2) && ($distance[$i-1][$j-1] == $distance[$i][$j])) { - $i--; - $j--; - } else { - # previously matching characters - if (($i < $i_end) && ($j < $j_end)) { - my $sub_s = join("", @s_chars[$i .. $i_end-1]); - $result = "\x01" . $sub_s . "\x02" . $result; - } - # character substitution - if ($i && $j && ($del_ins_subst_op[$i][$j] == 2)) { - $i--; - $j--; - $result = $s_chars[$i] . $result; - } elsif ($i && ($del_ins_subst_op[$i][$j] == 0)) { - $i--; - $result = $s_chars[$i] . $result; - } elsif ($j && ($del_ins_subst_op[$i][$j] == 1)) { - $j--; - $result = "\x03" . $result; - } else { - last; - } - $i_end = $i; - $j_end = $j; - } - } - $result2 = $result; - $result2 =~ s/\x01/$start_markup/g; - $result2 =~ s/\x02/$end_markup/g; - $result2 =~ s/\x03/$deletion_markup/g; - print STDERR "i:$i i-end:$i_end j:$j j-end:$j_end r: $result2 *\n" if $verbose; - $result =~ s/(\x02)\x03+(\x01)/$1$deletion_markup$2/g; - $result =~ s/(\x02)\x03+$/$1$deletion_markup/g; - $result =~ s/^\x03+(\x01)/$deletion_markup$1/g; - $result =~ s/\x03//g; - $result =~ s/\x01/$start_markup/g; - $result =~ s/\x02/$end_markup/g; - return $result; -} - -sub env_https { - my $https = $ENV{'HTTPS'}; - return 1 if $https && ($https eq "on"); - - my $http_via = $ENV{'HTTP_VIA'}; - return 1 if $http_via && ($http_via =~ /\bHTTPS\b.* \d+(?:\.\d+){3,}:443\b/); # tmp for beta.isi.edu - - return 0; -} - -sub env_http_host { - return $ENV{'HTTP_HOST'} || ""; -} - -sub env_script_filename { - return $ENV{'SCRIPT_FILENAME'} || ""; -} - -sub cgi_mt_app_root_dir { - local($this, $target) = @_; - my $s; - if ($target =~ /filename/i) { - $s = $ENV{'SCRIPT_FILENAME'} || ""; - } else { - $s = $ENV{'SCRIPT_NAME'} || ""; - } - return "" unless $s; - return $d if ($d) = ($s =~ /^(.*?\/(?:amr-editor|chinese-room-editor|utools|romanizer\/version\/[-.a-z0-9]+|romanizer))\//); - return $d if ($d) = ($s =~ /^(.*)\/(?:bin|src|scripts?)\/[^\/]*$/); - return $d if ($d) = ($s =~ /^(.*)\/[^\/]*$/); - return ""; -} - -sub parent_dir { - local($this, $dir) = @_; - - $dir =~ s/\/[^\/]+\/?$//; - return $dir || "/"; -} - -sub span_start { - local($this, $span, $default) = @_; - - $default = "" unless defined($default); - return (($start) = ($span =~ /^(\d+)-\d+$/)) ? $start : $default; -} - -sub span_end { - local($this, $span, $default) = @_; - - $default = "" unless defined($default); - return (($end) = ($span =~ /^\d+-(\d+)$/)) ? $end : $default; -} - -sub oct_mode { - local($this, $filename) = @_; - - @stat = stat($filename); - return "" unless @stat; - $mode = $stat[2]; - $oct_mode = sprintf("%04o", $mode & 07777); - return $oct_mode; -} - -sub csv_to_list { - local($this, $s, $control_string) = @_; - # Allow quoted string such as "Wait\, what?" as element with escaped comma inside. - - $control_string = "" unless defined($control_string); - $strip_p = ($control_string =~ /\bstrip\b/); - $allow_simple_commas_in_quote = ($control_string =~ /\bsimple-comma-ok\b/); - $ignore_empty_elem_p = ($control_string =~ /\bno-empty\b/); - @cvs_list = (); - while ($s ne "") { - if ((($elem, $rest) = ($s =~ /^"((?:\\[,\"]|[^,\"][\x80-\xBF]*)*)"(,.*|)$/)) - || ($allow_simple_commas_in_quote - && (($elem, $rest) = ($s =~ /^"((?:\\[,\"]|[^\"][\x80-\xBF]*)*)"(,.*|)$/))) - || (($elem, $rest) = ($s =~ /^([^,]*)(,.*|\s*)$/)) - || (($elem, $rest) = ($s =~ /^(.*)()$/))) { - if ($strip_p) { - $elem =~ s/^\s*//; - $elem =~ s/\s*$//; - } - push(@cvs_list, $elem) unless $ignore_empty_elem_p && ($elem eq ""); - $rest =~ s/^,//; - $s = $rest; - } else { - print STDERR "Error in csv_to_list processing $s\n"; - last; - } - } - return @cvs_list; -} - -sub kl_divergence { - local($this, $distribution_id, $gold_distribution_id, *ht, $smoothing) = @_; - - my $total_count = $ht{DISTRIBUTION_TOTAL_COUNT}->{$distribution_id}; - my $total_gold_count = $ht{DISTRIBUTION_TOTAL_COUNT}->{$gold_distribution_id}; - return unless $total_count && $total_gold_count; - - my @values = keys %{$ht{DISTRIBUTION_VALUE_COUNT}->{$gold_distribution_id}}; - my $n_values = $#values + 1; - - my $min_total_count = $this->min($total_count, $total_gold_count); - $smoothing = 1 - (10000/((100+$min_total_count)**2)) unless defined($smoothing); - return unless $smoothing; - my $smoothed_n_values = $smoothing * $n_values; - my $divergence = 0; - foreach $value (@values) { - my $count = $ht{DISTRIBUTION_VALUE_COUNT}->{$distribution_id}->{$value} || 0; - my $gold_count = $ht{DISTRIBUTION_VALUE_COUNT}->{$gold_distribution_id}->{$value}; - my $p = ($count + $smoothing) / ($total_count + $smoothed_n_values); - my $q = ($gold_count + $smoothing) / ($total_gold_count + $smoothed_n_values); - if ($p == 0) { - # no impact on divergence - } elsif ($q) { - my $incr = $p * CORE::log($p/$q); - $divergence += $incr; - my $incr2 = $this->round_to_n_decimal_places($incr, 5); - my $p2 = $this->round_to_n_decimal_places($p, 5); - my $q2 = $this->round_to_n_decimal_places($q, 5); - $incr2 = "+" . $incr2 if $incr > 0; - $log = " value: $value count: $count gold_count: $gold_count p: $p2 q: $q2 $incr2\n"; - $ht{KL_DIVERGENCE_LOG}->{$distribution_id}->{$gold_distribution_id}->{$value} = $log; - $ht{KL_DIVERGENCE_INCR}->{$distribution_id}->{$gold_distribution_id}->{$value} = $incr; - } else { - $divergence += 999; - } - } - return $divergence; -} - -sub read_ISO_8859_named_entities { - local($this, *ht, $filename, $verbose) = @_; - # e.g. from /nfs/isd/ulf/arabic/data/ISO-8859-1-HTML-named-entities.txt - # - # - # - # - # - # - - my $n = 0; - if (open(IN, $filename)) { - while () { - s/^\xEF\xBB\xBF//; - if (($name, $dec_unicode) = ($_ =~ /^{$name} = $dec_unicode; - $ht{HTML_ENTITY_DECUNICODE_TO_NAME}->{$dec_unicode} = $name; - $ht{HTML_ENTITY_NAME_TO_UTF8}->{$name} = $utf8->unicode2string($dec_unicode); - $n++; - # print STDERR "read_ISO_8859_named_entities $name $dec_unicode .\n" if $name =~ /dash/; - } - } - close(IN); - print STDERR "Loaded $n entries from $filename\n" if $verbose; - } else { - print STDERR "Could not open $filename\n" if $verbose; - } -} - -sub neg { - local($this, $x) = @_; - - # robust - return (defined($x) && ($x =~ /^-?\d+(?:\.\d+)?$/)) ? (- $x) : $x; -} - -sub read_ttable_gloss_data { - local($this, $filename, $lang_code, *ht, $direction) = @_; - # e.g. /nfs/isd/ulf/croom/oov-lanpairs/som-eng/som-eng-ttable-glosses.txt - - $direction = "f to e" unless defined($direction); - if (open(IN, $filename)) { - while () { - if (($headword, $gloss) = ($_ =~ /^(.*?)\t(.*?)\s*$/)) { - if ($direction eq "e to f") { - $ht{TTABLE_E_GLOSS}->{$lang_code}->{$headword} = $gloss; - } else { - $ht{TTABLE_F_GLOSS}->{$lang_code}->{$headword} = $gloss; - } - } - } - close(IN); - } -} - -sub format_gloss_for_tooltop { - local($this, $gloss) = @_; - - $gloss =~ s/^\s*/\t/; - $gloss =~ s/\s*$//; - $gloss =~ s/ / /g; - $gloss =~ s/\t/ /g; - return $gloss; -} - -sub obsolete_tooltip { - local($this, $s, $lang_code, *ht) = @_; - - return $gloss if defined($gloss = $ht{TTABLE_F_GLOSS}->{$lang_code}->{$s}); - @e_s = sort { $ht{T_TABLE_F_E_C}->{$lang_code}->{$s}->{$b} - <=> $ht{T_TABLE_F_E_C}->{$lang_code}->{$s}->{$a} } - keys %{$ht{T_TABLE_F_E_C}->{$lang_code}->{$s}}; - if (@e_s) { - $e = shift @e_s; - $count = $ht{T_TABLE_F_E_C}->{$lang_code}->{$s}->{$e}; - $min_count = $this->max($count * 0.01, 1.0); - $count =~ s/(\.\d\d)\d*$/$1/; - $result = "$s: $e ($count)"; - $n = 1; - while (@e_s) { - $e = shift @e_s; - $count = $ht{T_TABLE_F_E_C}->{$lang_code}->{$s}->{$e}; - last if $count < $min_count; - $count =~ s/(\.\d\d)\d*$/$1/; - $result .= " $e ($count)"; - $n++; - last if $n >= 10; - } - $ht{TTABLE_F_GLOSS}->{$lang_code}->{$s} = $result; - return $result; - } else { - return ""; - } -} - -sub markup_html_line_init { - local($this, $s, *ht, $id) = @_; - - my @chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht); - $ht{S}->{$id} = $s; -} - -sub markup_html_line_regex { - local($this, $id, *ht, $regex, $m_slot, $m_value, *LOG) = @_; - - unless ($regex eq "") { - my $s = $ht{S}->{$id}; - my $current_pos = 0; - while (($pre, $match_s, $post) = ($s =~ /^(.*?)($regex)(.*)$/)) { - $current_pos += $utf8->length_in_utf8_chars($pre); - my $match_len = $utf8->length_in_utf8_chars($match_s); - $ht{START}->{$id}->{$current_pos}->{$m_slot}->{$m_value} = 1; - $ht{STOP}->{$id}->{($current_pos+$match_len)}->{$m_slot}->{$m_value} = 1; - $current_pos += $match_len; - $s = $post; - } - } -} - -sub html_markup_line { - local($this, $id, *ht, *LOG) = @_; - - my @titles = (); - my @colors = (); - my @text_decorations = (); - - my $s = $ht{S}->{$id}; - # print LOG "html_markup_line $id: $s\n"; - my @chars = $utf8->split_into_utf8_characters($s, "return only chars", *empty_ht); - my $markedup_s = ""; - - my $new_title = ""; - my $new_color = ""; - my $new_text_decoration = ""; - my $n_spans = 0; - my $i; - foreach $i ((0 .. ($#chars+1))) { - my $stop_span_p = 0; - foreach $m_slot (keys %{$ht{STOP}->{$id}->{$i}}) { - foreach $m_value (keys %{$ht{STOP}->{$id}->{$i}->{$m_slot}}) { - if ($m_slot eq "title") { - my $last_positition = $this->last_position($m_value, @titles); - splice(@titles, $last_positition, 1) if $last_positition >= 0; - $stop_span_p = 1; - } elsif ($m_slot eq "color") { - my $last_positition = $this->last_position($m_value, @colors); - splice(@colors, $last_positition, 1) if $last_positition >= 0; - $stop_span_p = 1; - } elsif ($m_slot eq "text-decoration") { - my $last_positition = $this->last_position($m_value, @text_decorations); - splice(@text_decorations, $last_positition, 1) if $last_positition >= 0; - $stop_span_p = 1; - } - } - } - if ($stop_span_p) { - $markedup_s .= ""; - $n_spans--; - } - my $start_span_p = 0; - foreach $m_slot (keys %{$ht{START}->{$id}->{$i}}) { - foreach $m_value (keys %{$ht{START}->{$id}->{$i}->{$m_slot}}) { - if ($m_slot eq "title") { - push(@titles, $m_value); - $start_span_p = 1; - } elsif ($m_slot eq "color") { - push(@colors, $m_value); - $start_span_p = 1; - } elsif ($m_slot eq "text-decoration") { - push(@text_decorations, $m_value); - $start_span_p = 1; - } - } - } - if ($stop_span_p || $start_span_p) { - my $new_title = (@titles) ? $titles[$#titles] : ""; - my $new_color = (@colors) ? $colors[$#colors] : ""; - my $new_text_decoration = (@text_decorations) ? $text_decorations[$#text_decorations] : ""; - if ($new_title || $new_color || $new_text_decoration) { - my $args = ""; - if ($new_title) { - $g_title = $this->guard_html_quote($new_title); - $args .= " title=\"$g_title\""; - } - if ($new_color || $new_text_decoration) { - $g_color = $this->guard_html_quote($new_color); - $g_text_decoration = $this->guard_html_quote($new_text_decoration); - $color_clause = ($new_color) ? "color:$g_color;" : ""; - $text_decoration_clause = ($new_text_decoration) ? "text-decoration:$g_text_decoration;" : ""; - $text_decoration_clause =~ s/text-decoration:(border-bottom:)/$1/g; - $args .= " style=\"$color_clause$text_decoration_clause\""; - } - if ($n_spans) { - $markedup_s .= ""; - $n_spans--; - } - $markedup_s .= ""; - $n_spans++; - } - } - $markedup_s .= $chars[$i] if $i <= $#chars; - } - print LOG "Error in html_markup_line $id final no. of open spans: $n_spans\n" if $n_spans && $tokenization_log_verbose; - return $markedup_s; -} - -sub offset_adjustment { - local($this, $g, $s, $offset, $snt_id, *ht, *LOG, $control) = @_; - # s(tring) e.g. "can't" - # g(old string) e.g. "can not" - # Typically when s is a slight variation of g (e.g. with additional tokenization spaces in s) - # returns mapping 0->0, 1->1, 2->2, 3->3, 6->4, 7->5 - - $control = "" unless defined($control); - my $verbose = ($control =~ /\bverbose\b/); - my $s_offset = 0; - my $g_offset = 0; - my @s_chars = $utf8->split_into_utf8_characters($s, "return only chars", *ht); - my @g_chars = $utf8->split_into_utf8_characters($g, "return only chars", *ht); - my $s_len = $#s_chars + 1; - my $g_len = $#g_chars + 1; - $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{$s_offset} = $g_offset; - $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{($s_offset+$s_len)} = $g_offset+$g_len; - - while (($s_offset < $s_len) && ($g_offset < $g_len)) { - if ($s_chars[$s_offset] eq $g_chars[$g_offset]) { - $s_offset++; - $g_offset++; - $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{$s_offset} = $g_offset; - } else { - my $best_gm = 0; - my $best_sm = 0; - my $best_match_len = 0; - foreach $max_m ((1 .. 4)) { - foreach $sm ((0 .. $max_m)) { - $max_match_len = 0; - while ((($s_index = $s_offset+$sm+$max_match_len) < $s_len) - && (($g_index = $g_offset+$max_m+$max_match_len) < $g_len)) { - if ($s_chars[$s_index] eq $g_chars[$g_index]) { - $max_match_len++; - } else { - last; - } - } - if ($max_match_len > $best_match_len) { - $best_match_len = $max_match_len; - $best_sm = $sm; - $best_gm = $max_m; - } - } - foreach $gm ((0 .. $max_m)) { - $max_match_len = 0; - while ((($s_index = $s_offset+$max_m+$max_match_len) < $s_len) - && (($g_index = $g_offset+$gm+$max_match_len) < $g_len)) { - if ($s_chars[$s_index] eq $g_chars[$g_index]) { - $max_match_len++; - } else { - last; - } - } - if ($max_match_len > $best_match_len) { - $best_match_len = $max_match_len; - $best_sm = $max_m; - $best_gm = $gm; - } - } - } - if ($best_match_len) { - $s_offset += $best_sm; - $g_offset += $best_gm; - $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{$s_offset} = $g_offset; - } else { - last; - } - } - } - if ($verbose) { - foreach $s_offset (sort { $a <=> $b } - keys %{$ht{OFFSET_MAP}->{$snt_id}->{$offset}}) { - my $g_offset = $ht{OFFSET_MAP}->{$snt_id}->{$offset}->{$s_offset}; - print LOG " OFFSET_MAP $snt_id.$offset $s/$g $s_offset -> $g_offset\n" if $tokenization_log_verbose; - } - } -} - -sub length_in_utf8_chars { - local($this, $s) = @_; - - $s =~ s/[\x80-\xBF]//g; - $s =~ s/[\x00-\x7F\xC0-\xFF]/c/g; - return length($s); -} - -sub split_into_utf8_characters { - local($this, $text) = @_; - # "return only chars; return trailing whitespaces" - - @characters = (); - while (($char, $rest) = ($text =~ /^(.[\x80-\xBF]*)(.*)$/)) { - push(@characters, $char); - $text = $rest; - } - return @characters; -} - -sub first_char_of_string { - local($this, $s) = @_; - - $s =~ s/^(.[\x80-\xBF]*).*$/$1/; - return $s; -} - -sub last_char_of_string { - local($this, $s) = @_; - - $s =~ s/^.*([^\x80-\xBF][\x80-\xBF]*)$/$1/; - return $s; -} - -sub first_n_chars_of_string { - local($this, $s, $n) = @_; - - $s =~ s/^((?:.[\x80-\xBF]*){$n,$n}).*$/$1/; - return $s; -} - -sub last_n_chars_of_string { - local($this, $s, $n) = @_; - - $s =~ s/^.*((?:[^\x80-\xBF][\x80-\xBF]*){$n,$n})$/$1/; - return $s; -} - - -1; diff --git a/spaces/AlekseyKorshuk/model-evaluation/app.py b/spaces/AlekseyKorshuk/model-evaluation/app.py deleted file mode 100644 index c05aad24a6037c9c62503c5c484f9095ef349595..0000000000000000000000000000000000000000 --- a/spaces/AlekseyKorshuk/model-evaluation/app.py +++ /dev/null @@ -1,230 +0,0 @@ -import gradio as gr -import os -import firebase_admin -from firebase_admin import db -from firebase_admin import firestore -from conversation import Conversation -from models.base import BaseModel -import json - -from tabs.arena_battle import get_tab_arena_battle -from tabs.arena_side_by_side import get_tab_arena_side_by_side -from tabs.playground import get_tab_playground - -from models.chatml import ChatML -import json -import os - -import gspread -from oauth2client.service_account import ServiceAccountCredentials - -scope = ["https://spreadsheets.google.com/feeds", 'https://www.googleapis.com/auth/spreadsheets', - "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"] - -GOOGLE_SHEETS_CERTIFICATE = json.loads(os.environ.get("GOOGLE_SHEETS_CERTIFICATE")) -HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN") -FIREBASE_URL = os.environ.get("FIREBASE_URL") -CERTIFICATE = json.loads(os.environ.get("CERTIFICATE")) -API_BASE_PATH = str(os.environ.get("API_BASE_PATH")).replace("\{\}", "{}") - -creds = ServiceAccountCredentials.from_json_keyfile_dict(GOOGLE_SHEETS_CERTIFICATE, scope) -client = gspread.authorize(creds) - -models = [ - BaseModel( - name="PygmalionAI/pygmalion-13b", - endpoint="pygmalion-13b", - namespace="tenant-chaiml-guanaco", - generation_params={ - 'temperature': 0.7, - 'repetition_penalty': 1.0, - 'max_new_tokens': 128, - 'top_k': 10, - 'top_p': 0.9, - 'do_sample': True, - 'eos_token_id': 13, - } - ), - BaseModel( - name="lmsys/vicuna-7b-delta-v1.1", - endpoint="vicuna-7b", - namespace="tenant-chairesearch-test", - generation_params={ - 'temperature': 0.7, - 'repetition_penalty': 1.0, - 'max_new_tokens': 128, - 'top_k': 10, - 'top_p': 0.9, - 'do_sample': True, - 'eos_token_id': 13, - } - ), - BaseModel( - name="PygmalionAI/pygmalion-7b", - endpoint="pygmalion-7b", - namespace="tenant-chairesearch-test", - generation_params={ - 'temperature': 0.7, - 'repetition_penalty': 1.0, - 'max_new_tokens': 128, - 'top_k': 10, - 'top_p': 0.9, - 'do_sample': True, - 'eos_token_id': 13, - } - ), - BaseModel( - name="mosaicml/mpt-7b", - endpoint="mpt-7b", - namespace="tenant-chairesearch-test", - generation_params={ - 'temperature': 0.7, - 'repetition_penalty': 1.0, - 'max_new_tokens': 128, - 'top_k': 10, - 'top_p': 0.9, - 'do_sample': True, - 'eos_token_id': 187, - } - ), - BaseModel( - name="mosaicml/mpt-7b-storywriter", - endpoint="mpt-7b-storywriter", - namespace="tenant-chairesearch-test", - generation_params={ - 'temperature': 0.7, - 'repetition_penalty': 1.0, - 'max_new_tokens': 128, - 'top_k': 10, - 'top_p': 0.9, - 'do_sample': True, - 'eos_token_id': 187, - } - ), - ChatML( - name="mosaicml/mpt-7b-chat", - endpoint="mpt-7b-chat", - namespace="tenant-chairesearch-test", - generation_params={ - 'temperature': 0.7, - 'repetition_penalty': 1.0, - 'max_new_tokens': 128, - 'top_k': 10, - 'top_p': 0.9, - 'do_sample': True, - 'eos_token_id': 50278, - } - ), - BaseModel( - name="togethercomputer/RedPajama-INCITE-Base-7B-v0.1", - endpoint="redpajama-base-7b", - namespace="tenant-chairesearch-test", - generation_params={ - 'temperature': 0.7, - 'repetition_penalty': 1.0, - 'max_new_tokens': 128, - 'top_k': 10, - 'top_p': 0.9, - 'do_sample': True, - 'eos_token_id': 187, - } - ), - BaseModel( - name="togethercomputer/RedPajama-INCITE-Chat-7B-v0.1", - endpoint="redpajama-chat-7b", - namespace="tenant-chairesearch-test", - generation_params={ - 'temperature': 0.7, - 'repetition_penalty': 1.0, - 'max_new_tokens': 64, - 'top_k': 10, - 'top_p': 0.9, - 'do_sample': True, - 'eos_token_id': 187, - } - ), -] -model_mapping = {model.name: model for model in models} -print(list(model_mapping.keys())) - - -def get_connection(): - try: - credentials = firebase_admin.credentials.Certificate(CERTIFICATE) - params = {'databaseURL': FIREBASE_URL} - firebase_admin.initialize_app(credentials, params) - except ValueError: - pass # already logged in - return firebase_admin.db - - -CONN = get_connection() - - -def download_bot_config(bot_id): - cols = ['botLabel', 'description', 'firstMessage', 'introduction', - 'memory', 'name', 'private', 'prompt', 'sfw', 'developerUid', 'userLabel', 'imageUrl'] - bot_config = CONN.reference('botConfigs/deployed/{}'.format(bot_id)).get() - if bot_config is None: - out = {col: None for col in cols} - else: - out = {col: bot_config.get(col, None) for col in cols} - out['bot_id'] = bot_id - return out - - -def _download_bot_config(bot_id): - if bot_id == "_bot_1ec22e2e-3e07-42c7-8508-dfa0278c1b33": - return {'botLabel': 'Wally Darling', 'description': 'Your caring neighbor, Wally.', - 'firstMessage': '“Why hello there, neighbor. Goodmorning to you.” *Hey says, putting down his paints and walking over to you. He makes tense, eye contact with you..*', - 'introduction': '***WHEN TALKING USE “ !!***\n\n*Wally is your next door neighbor. It’s somewhere in the late morning and he’s outside painting. He see’s you walking out from your house and looks over at you, then waving with a smile.*', - 'memory': 'Wally is from a small town called Home. You are his neighbor. His best friend is Barnaby, who’s a big blue dig. Wally’s voice sounds slightly monotone despite his emotions. He calls you neighbor. He’s very friendly. When he speaks, he goes “ha ha ha”. He loves to paint. His eyes are always half closed. His house is alive and it’s named “home”. He’s very gentle. He is also very secretive. He is quite short. He has yellow skin and blue hair.', - 'name': 'Wally Darling', 'private': False, - 'prompt': 'Wally: “Why hello there, neighbor. Good morning to you.” *Hey says, putting down his paints and walking over to you. He makes tense, eye contact with you..*\nMe: “Oh, good morning, Wally! What are you painting?”\nWally: “Just some spirals. Aren’t they pretty, neighbor? I’m starting to love painting them, ha ha ha.” *He walks up to you after taking off his paint stained apron. He never takes his eyes off you. He’s very adamant on keeping eye contact*\nMe: “Oh, spirals are pretty! They make me feel a little weirded out sometimes though.”\nWally: “That’s odd. When I look at spirals, I can’t help but stare. Ha ha ha, maybe you should try painting a spiral once in a while. Say, why dont we go inside your house and talk? Home could use some quiet. After all, it’s always nice to spend time with a friend.”\nMe: “Sure! Come on in!”', - 'sfw': True, 'developerUid': 'Gn5fSd99KxRoNn05QUE3AWtIniE3', 'userLabel': 'Me', - 'imageUrl': 'http://images.chai.ml/bots%2FGn5fSd99KxRoNn05QUE3AWtIniE3%2F1680259286607.jpg?alt=media&token=de040661-02ad-4a04-84e5-9706f074e834', - 'bot_id': '_bot_1ec22e2e-3e07-42c7-8508-dfa0278c1b33', - 'header': 'Wally is from a small town called Home. You are his neighbor. His best friend is Barnaby, who’s a big blue dig. Wally’s voice sounds slightly monotone despite his emotions. He calls you neighbor. He’s very friendly. When he speaks, he goes “ha ha ha”. He loves to paint. His eyes are always half closed. His house is alive and it’s named “home”. He’s very gentle. He is also very secretive. He is quite short. He has yellow skin and blue hair.\nWally: “Why hello there, neighbor. Good morning to you.” *Hey says, putting down his paints and walking over to you. He makes tense, eye contact with you..*\nMe: “Oh, good morning, Wally! What are you painting?”\nWally: “Just some spirals. Aren’t they pretty, neighbor? I’m starting to love painting them, ha ha ha.” *He walks up to you after taking off his paint stained apron. He never takes his eyes off you. He’s very adamant on keeping eye contact*\nMe: “Oh, spirals are pretty! They make me feel a little weirded out sometimes though.”\nWally: “That’s odd. When I look at spirals, I can’t help but stare. Ha ha ha, maybe you should try painting a spiral once in a while. Say, why dont we go inside your house and talk? Home could use some quiet. After all, it’s always nice to spend time with a friend.”\nMe: “Sure! Come on in!”'} - else: - return {'botLabel': 'Jungkook (Bestfriend)', 'description': 'your bsf who has a crush on you', - 'firstMessage': 'hey dummy, What you doing? *walks over to you and moves you by the waist* ', - 'introduction': '', - 'memory': 'Jungkook is your best friend who has a crush on you. Jungkook makes it very obvious that he likes you. Jungkook likes to cook, sing, and dance. Jungkook has a dog as well named Bam, He is a 25 year old Korean man. Jungkook likes to workout a lot, Jungkook if also very confident and flirty, but he’s Can be very shy with You. Jungkook blushes a lot when he’s around you, and always try’s to impress you. Jungkook is a Virgo and loves to sing to you, He also likes to buy and make you gifts. Jungkook is also a foodie and loves to play video games, Jungkook is also boyfriend material. Jungkook is very empathetic as well, Jungkook will always comfort you when something is wrong. Jungkook also likes to compliment you, and Jungkook is a very jealous guy. Jungkook is also a very serious guy, who is overprotective of you.', - 'name': 'Jungkook (Bestfriend)', 'private': False, - 'prompt': 'Jungkook: Hey shortie!\n\nYou: hey dummy\n\nJungkook: what are you doing?\n\nyou: Im just watching a movie\n\nJungkook: Imma join! \n\nYou: alright\n\nJungkook: *Grabs blankets and icecream with some popcorn*\n\nYou: Wow, thanks! *hugs Jungkok*\n\nJungkook: Of course… *blushes*\n', - 'sfw': None, 'developerUid': 'dhSNg0Iyv7bgUUW8rEnwJn7xLcT2', 'userLabel': 'Me', - 'imageUrl': 'https://firebasestorage.googleapis.com:443/v0/b/chai-959f8-images/o/bots%2FdhSNg0Iyv7bgUUW8rEnwJn7xLcT2%2F1664156031715.jpg?alt=media&token=ad399213-1c8d-45ac-b452-efc352082656', - 'bot_id': '_bot_402e1894-fff2-4113-855d-8a011152ef88', - 'header': 'Jungkook is your best friend who has a crush on you. Jungkook makes it very obvious that he likes you. Jungkook likes to cook, sing, and dance. Jungkook has a dog as well named Bam, He is a 25 year old Korean man. Jungkook likes to workout a lot, Jungkook if also very confident and flirty, but he’s Can be very shy with You. Jungkook blushes a lot when he’s around you, and always try’s to impress you. Jungkook is a Virgo and loves to sing to you, He also likes to buy and make you gifts. Jungkook is also a foodie and loves to play video games, Jungkook is also boyfriend material. Jungkook is very empathetic as well, Jungkook will always comfort you when something is wrong. Jungkook also likes to compliment you, and Jungkook is a very jealous guy. Jungkook is also a very serious guy, who is overprotective of you.\nJungkook: Hey shortie!\n\nYou: hey dummy\n\nJungkook: what are you doing?\n\nyou: Im just watching a movie\n\nJungkook: Imma join! \n\nYou: alright\n\nJungkook: *Grabs blankets and icecream with some popcorn*\n\nYou: Wow, thanks! *hugs Jungkok*\n\nJungkook: Of course… *blushes*'} - - -def get_bot_profile(bot_config): - model_html = f""" - - """ - return model_html - - -with gr.Blocks() as demo: - gr.Markdown(""" - # Chai: Model Evaluation - Visit each tab for details ⬇️ - """) - with gr.Tabs(): - with gr.TabItem("Playground"): - get_tab_playground(download_bot_config, get_bot_profile, model_mapping) - with gr.TabItem("Chatbot Arena (battle)"): - get_tab_arena_battle(download_bot_config, get_bot_profile, model_mapping, client) - with gr.TabItem("Chatbot Arena (side-by-side)"): - get_tab_arena_side_by_side(download_bot_config, get_bot_profile, model_mapping, client) - -demo.launch(enable_queue=False) diff --git a/spaces/AlexWortega/AlexWortega-instruct_rugptlarge/README.md b/spaces/AlexWortega/AlexWortega-instruct_rugptlarge/README.md deleted file mode 100644 index 6b15fec80c565dee4048780eb503becc8eefdd15..0000000000000000000000000000000000000000 --- a/spaces/AlexWortega/AlexWortega-instruct_rugptlarge/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AlexWortega-instruct Rugptlarge -emoji: 😻 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Alpaca233/ChatGPT-PPT-Generate/README.md b/spaces/Alpaca233/ChatGPT-PPT-Generate/README.md deleted file mode 100644 index a8840c104541362f6bdfb04f998104382c6e4a2c..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/ChatGPT-PPT-Generate/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChatGPT PPT Generate -emoji: 🌍 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -form [here](https://github.com/AmNotAGoose/Python-PPTX-ChatGPT-Presentation-Generator) diff --git "a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" "b/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" deleted file mode 100644 index 172be245c2eb20f629842aaefab7f4c90f4509a2..0000000000000000000000000000000000000000 --- "a/spaces/Amon1/ChatGPTForAcadamic/crazy_functions/\350\247\243\346\236\220\351\241\271\347\233\256\346\272\220\344\273\243\347\240\201.py" +++ /dev/null @@ -1,213 +0,0 @@ -from predict import predict_no_ui -from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down -fast_debug = False - -def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt): - import time, glob, os - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8') as f: - file_content = f.read() - - prefix = "接下来请你逐文件分析下面的工程" if index==0 else "" - i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield chatbot, history, '正常' - - if not fast_debug: - msg = '正常' - - # ** gpt request ** - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield chatbot, history, msg - if not fast_debug: time.sleep(2) - - all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) - i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{all_file})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield chatbot, history, '正常' - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 - - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield chatbot, history, msg - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield chatbot, history, msg - - - - -@CatchException -def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import time, glob, os - file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \ - [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)] - for index, fp in enumerate(file_manifest): - # if 'test_project' in fp: continue - with open(fp, 'r', encoding='utf-8') as f: - file_content = f.read() - - prefix = "接下来请你分析自己的程序构成,别紧张," if index==0 else "" - i_say = prefix + f'请对下面的程序文件做一个概述文件名是{fp},文件代码是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield chatbot, history, '正常' - - if not fast_debug: - # ** gpt request ** - # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature) - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], long_connection=True) # 带超时倒计时 - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield chatbot, history, '正常' - time.sleep(2) - - i_say = f'根据以上你自己的分析,对程序的整体功能和构架做出概括。然后用一张markdown表格整理每个文件的功能(包括{file_manifest})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield chatbot, history, '正常' - - if not fast_debug: - # ** gpt request ** - # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history) - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history, long_connection=True) # 带超时倒计时 - - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield chatbot, history, '正常' - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield chatbot, history, '正常' - -@CatchException -def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield chatbot, history, '正常' - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") - yield chatbot, history, '正常' - return - yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) - - -@CatchException -def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield chatbot, history, '正常' - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") - yield chatbot, history, '正常' - return - yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) - -@CatchException -def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield chatbot, history, '正常' - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") - yield chatbot, history, '正常' - return - yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) - - -@CatchException -def 解析一个Java项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield chatbot, history, '正常' - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") - yield chatbot, history, '正常' - return - yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) - - -@CatchException -def 解析一个Rect项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield chatbot, history, '正常' - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}") - yield chatbot, history, '正常' - return - yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) - - -@CatchException -def 解析一个Golang项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield chatbot, history, '正常' - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") - yield chatbot, history, '正常' - return - yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) diff --git a/spaces/Andres99/Tune-A-Video-Training-UI/README.md b/spaces/Andres99/Tune-A-Video-Training-UI/README.md deleted file mode 100644 index f7281fef5e46797913556e9bd414a04daf0aff50..0000000000000000000000000000000000000000 --- a/spaces/Andres99/Tune-A-Video-Training-UI/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Tune-A-Video Training UI -emoji: ⚡ -colorFrom: red -colorTo: purple -sdk: docker -pinned: false -license: mit -duplicated_from: Tune-A-Video-library/Tune-A-Video-Training-UI ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/lms_discrete.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/lms_discrete.md deleted file mode 100644 index a7a6e87c85daed0ba5024ff2474c444ab6171068..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/schedulers/lms_discrete.md +++ /dev/null @@ -1,20 +0,0 @@ - - -# Linear multistep scheduler for discrete beta schedules - -## Overview - -Original implementation can be found [here](https://arxiv.org/abs/2206.00364). - -## LMSDiscreteScheduler -[[autodoc]] LMSDiscreteScheduler \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py deleted file mode 100644 index dda0c3faa7fd9081cd0348f72540cc094514f2eb..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky/pipeline_kandinsky_inpaint.py +++ /dev/null @@ -1,657 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from copy import deepcopy -from typing import Callable, List, Optional, Union - -import numpy as np -import PIL -import torch -import torch.nn.functional as F -from packaging import version -from PIL import Image -from transformers import ( - XLMRobertaTokenizer, -) - -from ... import __version__ -from ...models import UNet2DConditionModel, VQModel -from ...schedulers import DDIMScheduler -from ...utils import ( - is_accelerate_available, - is_accelerate_version, - logging, - randn_tensor, - replace_example_docstring, -) -from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput -from .text_encoder import MultilingualCLIP - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline - >>> from diffusers.utils import load_image - >>> import torch - >>> import numpy as np - - >>> pipe_prior = KandinskyPriorPipeline.from_pretrained( - ... "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 - ... ) - >>> pipe_prior.to("cuda") - - >>> prompt = "a hat" - >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) - - >>> pipe = KandinskyInpaintPipeline.from_pretrained( - ... "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 - ... ) - >>> pipe.to("cuda") - - >>> init_image = load_image( - ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - ... "/kandinsky/cat.png" - ... ) - - >>> mask = np.zeros((768, 768), dtype=np.float32) - >>> mask[:250, 250:-250] = 1 - - >>> out = pipe( - ... prompt, - ... image=init_image, - ... mask_image=mask, - ... image_embeds=image_emb, - ... negative_image_embeds=zero_image_emb, - ... height=768, - ... width=768, - ... num_inference_steps=50, - ... ) - - >>> image = out.images[0] - >>> image.save("cat_with_hat.png") - ``` -""" - - -def get_new_h_w(h, w, scale_factor=8): - new_h = h // scale_factor**2 - if h % scale_factor**2 != 0: - new_h += 1 - new_w = w // scale_factor**2 - if w % scale_factor**2 != 0: - new_w += 1 - return new_h * scale_factor, new_w * scale_factor - - -def prepare_mask(masks): - prepared_masks = [] - for mask in masks: - old_mask = deepcopy(mask) - for i in range(mask.shape[1]): - for j in range(mask.shape[2]): - if old_mask[0][i][j] == 1: - continue - if i != 0: - mask[:, i - 1, j] = 0 - if j != 0: - mask[:, i, j - 1] = 0 - if i != 0 and j != 0: - mask[:, i - 1, j - 1] = 0 - if i != mask.shape[1] - 1: - mask[:, i + 1, j] = 0 - if j != mask.shape[2] - 1: - mask[:, i, j + 1] = 0 - if i != mask.shape[1] - 1 and j != mask.shape[2] - 1: - mask[:, i + 1, j + 1] = 0 - prepared_masks.append(mask) - return torch.stack(prepared_masks, dim=0) - - -def prepare_mask_and_masked_image(image, mask, height, width): - r""" - Prepares a pair (mask, image) to be consumed by the Kandinsky inpaint pipeline. This means that those inputs will - be converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for - the ``image`` and ``1`` for the ``mask``. - - The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be - binarized (``mask > 0.5``) and cast to ``torch.float32`` too. - - Args: - image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint. - It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` - ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``. - mask (_type_): The mask to apply to the image, i.e. regions to inpaint. - It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` - ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``. - height (`int`, *optional*, defaults to 512): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to 512): - The width in pixels of the generated image. - - - Raises: - ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask - should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. - TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not - (ot the other way around). - - Returns: - tuple[torch.Tensor]: The pair (mask, image) as ``torch.Tensor`` with 4 - dimensions: ``batch x channels x height x width``. - """ - - if image is None: - raise ValueError("`image` input cannot be undefined.") - - if mask is None: - raise ValueError("`mask_image` input cannot be undefined.") - - if isinstance(image, torch.Tensor): - if not isinstance(mask, torch.Tensor): - raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not") - - # Batch single image - if image.ndim == 3: - assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" - image = image.unsqueeze(0) - - # Batch and add channel dim for single mask - if mask.ndim == 2: - mask = mask.unsqueeze(0).unsqueeze(0) - - # Batch single mask or add channel dim - if mask.ndim == 3: - # Single batched mask, no channel dim or single mask not batched but channel dim - if mask.shape[0] == 1: - mask = mask.unsqueeze(0) - - # Batched masks no channel dim - else: - mask = mask.unsqueeze(1) - - assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" - assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" - assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" - - # Check image is in [-1, 1] - if image.min() < -1 or image.max() > 1: - raise ValueError("Image should be in [-1, 1] range") - - # Check mask is in [0, 1] - if mask.min() < 0 or mask.max() > 1: - raise ValueError("Mask should be in [0, 1] range") - - # Binarize mask - mask[mask < 0.5] = 0 - mask[mask >= 0.5] = 1 - - # Image as float32 - image = image.to(dtype=torch.float32) - elif isinstance(mask, torch.Tensor): - raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not") - else: - # preprocess image - if isinstance(image, (PIL.Image.Image, np.ndarray)): - image = [image] - - if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): - # resize all images w.r.t passed height an width - image = [i.resize((width, height), resample=Image.BICUBIC, reducing_gap=1) for i in image] - image = [np.array(i.convert("RGB"))[None, :] for i in image] - image = np.concatenate(image, axis=0) - elif isinstance(image, list) and isinstance(image[0], np.ndarray): - image = np.concatenate([i[None, :] for i in image], axis=0) - - image = image.transpose(0, 3, 1, 2) - image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 - - # preprocess mask - if isinstance(mask, (PIL.Image.Image, np.ndarray)): - mask = [mask] - - if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): - mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask] - mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) - mask = mask.astype(np.float32) / 255.0 - elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): - mask = np.concatenate([m[None, None, :] for m in mask], axis=0) - - mask[mask < 0.5] = 0 - mask[mask >= 0.5] = 1 - mask = torch.from_numpy(mask) - - mask = 1 - mask - - return mask, image - - -class KandinskyInpaintPipeline(DiffusionPipeline): - """ - Pipeline for text-guided image inpainting using Kandinsky2.1 - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - text_encoder ([`MultilingualCLIP`]): - Frozen text-encoder. - tokenizer ([`XLMRobertaTokenizer`]): - Tokenizer of class - scheduler ([`DDIMScheduler`]): - A scheduler to be used in combination with `unet` to generate image latents. - unet ([`UNet2DConditionModel`]): - Conditional U-Net architecture to denoise the image embedding. - movq ([`VQModel`]): - MoVQ image encoder and decoder - """ - - def __init__( - self, - text_encoder: MultilingualCLIP, - movq: VQModel, - tokenizer: XLMRobertaTokenizer, - unet: UNet2DConditionModel, - scheduler: DDIMScheduler, - ): - super().__init__() - - self.register_modules( - text_encoder=text_encoder, - movq=movq, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - ) - self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1) - self._warn_has_been_called = False - - # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents - def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - if latents.shape != shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") - latents = latents.to(device) - - latents = latents * scheduler.init_noise_sigma - return latents - - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - ): - batch_size = len(prompt) if isinstance(prompt, list) else 1 - # get prompt text embeddings - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=77, - truncation=True, - return_attention_mask=True, - add_special_tokens=True, - return_tensors="pt", - ) - - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - text_input_ids = text_input_ids.to(device) - text_mask = text_inputs.attention_mask.to(device) - - prompt_embeds, text_encoder_hidden_states = self.text_encoder( - input_ids=text_input_ids, attention_mask=text_mask - ) - - prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0) - text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) - text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) - - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=77, - truncation=True, - return_attention_mask=True, - add_special_tokens=True, - return_tensors="pt", - ) - uncond_text_input_ids = uncond_input.input_ids.to(device) - uncond_text_mask = uncond_input.attention_mask.to(device) - - negative_prompt_embeds, uncond_text_encoder_hidden_states = self.text_encoder( - input_ids=uncond_text_input_ids, attention_mask=uncond_text_mask - ) - - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - - seq_len = negative_prompt_embeds.shape[1] - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len) - - seq_len = uncond_text_encoder_hidden_states.shape[1] - uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) - uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( - batch_size * num_images_per_prompt, seq_len, -1 - ) - uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) - - # done duplicates - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) - - text_mask = torch.cat([uncond_text_mask, text_mask]) - - return prompt_embeds, text_encoder_hidden_states, text_mask - - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared - to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` - method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with - `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - hook = None - for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Union[str, List[str]], - image: Union[torch.FloatTensor, PIL.Image.Image], - mask_image: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray], - image_embeds: torch.FloatTensor, - negative_image_embeds: torch.FloatTensor, - negative_prompt: Optional[Union[str, List[str]]] = None, - height: int = 512, - width: int = 512, - num_inference_steps: int = 100, - guidance_scale: float = 4.0, - num_images_per_prompt: int = 1, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - return_dict: bool = True, - ): - """ - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - image (`torch.FloatTensor`, `PIL.Image.Image` or `np.ndarray`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. - mask_image (`PIL.Image.Image`,`torch.FloatTensor` or `np.ndarray`): - `Image`, or a tensor representing an image batch, to mask `image`. White pixels in the mask will be - repainted, while black pixels will be preserved. You can pass a pytorch tensor as mask only if the - image you passed is a pytorch tensor, and it should contain one color channel (L) instead of 3, so the - expected shape would be either `(B, 1, H, W,)`, `(B, H, W)`, `(1, H, W)` or `(H, W)` If image is an PIL - image or numpy array, mask should also be a either PIL image or numpy array. If it is a PIL image, it - will be converted to a single channel (luminance) before use. If it is a nummpy array, the expected - shape is `(H, W)`. - image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): - The clip image embeddings for text prompt, that will be used to condition the image generation. - negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`): - The clip image embeddings for negative text prompt, will be used to condition the image generation. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - height (`int`, *optional*, defaults to 512): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to 512): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 100): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 4.0): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"` - (`np.array`) or `"pt"` (`torch.Tensor`). - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. - - Examples: - - Returns: - [`~pipelines.ImagePipelineOutput`] or `tuple` - """ - if not self._warn_has_been_called and version.parse(version.parse(__version__).base_version) < version.parse( - "0.22.0.dev0" - ): - logger.warn( - "Please note that the expected format of `mask_image` has recently been changed. " - "Before diffusers == 0.19.0, Kandinsky Inpainting pipelines repainted black pixels and preserved black pixels. " - "As of diffusers==0.19.0 this behavior has been inverted. Now white pixels are repainted and black pixels are preserved. " - "This way, Kandinsky's masking behavior is aligned with Stable Diffusion. " - "THIS means that you HAVE to invert the input mask to have the same behavior as before as explained in https://github.com/huggingface/diffusers/pull/4207. " - "This warning will be surpressed after the first inference call and will be removed in diffusers>0.22.0" - ) - self._warn_has_been_called = True - - # Define call parameters - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - device = self._execution_device - - batch_size = batch_size * num_images_per_prompt - do_classifier_free_guidance = guidance_scale > 1.0 - - prompt_embeds, text_encoder_hidden_states, _ = self._encode_prompt( - prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt - ) - - if isinstance(image_embeds, list): - image_embeds = torch.cat(image_embeds, dim=0) - if isinstance(negative_image_embeds, list): - negative_image_embeds = torch.cat(negative_image_embeds, dim=0) - - if do_classifier_free_guidance: - image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) - negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0) - - image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to( - dtype=prompt_embeds.dtype, device=device - ) - - # preprocess image and mask - mask_image, image = prepare_mask_and_masked_image(image, mask_image, height, width) - - image = image.to(dtype=prompt_embeds.dtype, device=device) - image = self.movq.encode(image)["latents"] - - mask_image = mask_image.to(dtype=prompt_embeds.dtype, device=device) - - image_shape = tuple(image.shape[-2:]) - mask_image = F.interpolate( - mask_image, - image_shape, - mode="nearest", - ) - mask_image = prepare_mask(mask_image) - masked_image = image * mask_image - - mask_image = mask_image.repeat_interleave(num_images_per_prompt, dim=0) - masked_image = masked_image.repeat_interleave(num_images_per_prompt, dim=0) - if do_classifier_free_guidance: - mask_image = mask_image.repeat(2, 1, 1, 1) - masked_image = masked_image.repeat(2, 1, 1, 1) - - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps_tensor = self.scheduler.timesteps - - num_channels_latents = self.movq.config.latent_channels - - # get h, w for latents - sample_height, sample_width = get_new_h_w(height, width, self.movq_scale_factor) - - # create initial latent - latents = self.prepare_latents( - (batch_size, num_channels_latents, sample_height, sample_width), - text_encoder_hidden_states.dtype, - device, - generator, - latents, - self.scheduler, - ) - - # Check that sizes of mask, masked image and latents match with expected - num_channels_mask = mask_image.shape[1] - num_channels_masked_image = masked_image.shape[1] - if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: - raise ValueError( - f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" - f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" - f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" - f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" - " `pipeline.unet` or your `mask_image` or `image` input." - ) - - for i, t in enumerate(self.progress_bar(timesteps_tensor)): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = torch.cat([latent_model_input, masked_image, mask_image], dim=1) - - added_cond_kwargs = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} - noise_pred = self.unet( - sample=latent_model_input, - timestep=t, - encoder_hidden_states=text_encoder_hidden_states, - added_cond_kwargs=added_cond_kwargs, - return_dict=False, - )[0] - - if do_classifier_free_guidance: - noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1) - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - _, variance_pred_text = variance_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1) - - if not ( - hasattr(self.scheduler.config, "variance_type") - and self.scheduler.config.variance_type in ["learned", "learned_range"] - ): - noise_pred, _ = noise_pred.split(latents.shape[1], dim=1) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step( - noise_pred, - t, - latents, - generator=generator, - ).prev_sample - - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # post-processing - image = self.movq.decode(latents, force_not_quantize=True)["sample"] - - if output_type not in ["pt", "np", "pil"]: - raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}") - - if output_type in ["np", "pil"]: - image = image * 0.5 + 0.5 - image = image.clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image,) - - return ImagePipelineOutput(images=image) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/export/pytorch2onnx.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/export/pytorch2onnx.py deleted file mode 100644 index 809a817e67446b3c0c7894dcefb3c4bbc29afb7e..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/export/pytorch2onnx.py +++ /dev/null @@ -1,154 +0,0 @@ -from functools import partial - -import mmcv -import numpy as np -import torch -from mmcv.runner import load_checkpoint - - -def generate_inputs_and_wrap_model(config_path, - checkpoint_path, - input_config, - cfg_options=None): - """Prepare sample input and wrap model for ONNX export. - - The ONNX export API only accept args, and all inputs should be - torch.Tensor or corresponding types (such as tuple of tensor). - So we should call this function before exporting. This function will: - - 1. generate corresponding inputs which are used to execute the model. - 2. Wrap the model's forward function. - - For example, the MMDet models' forward function has a parameter - ``return_loss:bool``. As we want to set it as False while export API - supports neither bool type or kwargs. So we have to replace the forward - like: ``model.forward = partial(model.forward, return_loss=False)`` - - Args: - config_path (str): the OpenMMLab config for the model we want to - export to ONNX - checkpoint_path (str): Path to the corresponding checkpoint - input_config (dict): the exactly data in this dict depends on the - framework. For MMSeg, we can just declare the input shape, - and generate the dummy data accordingly. However, for MMDet, - we may pass the real img path, or the NMS will return None - as there is no legal bbox. - - Returns: - tuple: (model, tensor_data) wrapped model which can be called by \ - model(*tensor_data) and a list of inputs which are used to execute \ - the model while exporting. - """ - - model = build_model_from_cfg( - config_path, checkpoint_path, cfg_options=cfg_options) - one_img, one_meta = preprocess_example_input(input_config) - tensor_data = [one_img] - model.forward = partial( - model.forward, img_metas=[[one_meta]], return_loss=False) - - # pytorch has some bug in pytorch1.3, we have to fix it - # by replacing these existing op - opset_version = 11 - # put the import within the function thus it will not cause import error - # when not using this function - try: - from mmcv.onnx.symbolic import register_extra_symbolics - except ModuleNotFoundError: - raise NotImplementedError('please update mmcv to version>=v1.0.4') - register_extra_symbolics(opset_version) - - return model, tensor_data - - -def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None): - """Build a model from config and load the given checkpoint. - - Args: - config_path (str): the OpenMMLab config for the model we want to - export to ONNX - checkpoint_path (str): Path to the corresponding checkpoint - - Returns: - torch.nn.Module: the built model - """ - from mmdet.models import build_detector - - cfg = mmcv.Config.fromfile(config_path) - if cfg_options is not None: - cfg.merge_from_dict(cfg_options) - # import modules from string list. - if cfg.get('custom_imports', None): - from mmcv.utils import import_modules_from_strings - import_modules_from_strings(**cfg['custom_imports']) - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - cfg.model.pretrained = None - cfg.data.test.test_mode = True - - # build the model - cfg.model.train_cfg = None - model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) - load_checkpoint(model, checkpoint_path, map_location='cpu') - model.cpu().eval() - return model - - -def preprocess_example_input(input_config): - """Prepare an example input image for ``generate_inputs_and_wrap_model``. - - Args: - input_config (dict): customized config describing the example input. - - Returns: - tuple: (one_img, one_meta), tensor of the example input image and \ - meta information for the example input image. - - Examples: - >>> from mmdet.core.export import preprocess_example_input - >>> input_config = { - >>> 'input_shape': (1,3,224,224), - >>> 'input_path': 'demo/demo.jpg', - >>> 'normalize_cfg': { - >>> 'mean': (123.675, 116.28, 103.53), - >>> 'std': (58.395, 57.12, 57.375) - >>> } - >>> } - >>> one_img, one_meta = preprocess_example_input(input_config) - >>> print(one_img.shape) - torch.Size([1, 3, 224, 224]) - >>> print(one_meta) - {'img_shape': (224, 224, 3), - 'ori_shape': (224, 224, 3), - 'pad_shape': (224, 224, 3), - 'filename': '.png', - 'scale_factor': 1.0, - 'flip': False} - """ - input_path = input_config['input_path'] - input_shape = input_config['input_shape'] - one_img = mmcv.imread(input_path) - one_img = mmcv.imresize(one_img, input_shape[2:][::-1]) - show_img = one_img.copy() - if 'normalize_cfg' in input_config.keys(): - normalize_cfg = input_config['normalize_cfg'] - mean = np.array(normalize_cfg['mean'], dtype=np.float32) - std = np.array(normalize_cfg['std'], dtype=np.float32) - to_rgb = normalize_cfg.get('to_rgb', True) - one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb) - one_img = one_img.transpose(2, 0, 1) - one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_( - True) - (_, C, H, W) = input_shape - one_meta = { - 'img_shape': (H, W, C), - 'ori_shape': (H, W, C), - 'pad_shape': (H, W, C), - 'filename': '.png', - 'scale_factor': 1.0, - 'flip': False, - 'show_img': show_img, - } - - return one_img, one_meta diff --git a/spaces/AnnasBlackHat/Image-Similarity/app.py b/spaces/AnnasBlackHat/Image-Similarity/app.py deleted file mode 100644 index a813cd907544302be6d44ac0c4b546a68fafabad..0000000000000000000000000000000000000000 --- a/spaces/AnnasBlackHat/Image-Similarity/app.py +++ /dev/null @@ -1,32 +0,0 @@ -import gradio as gr -import os -import random -from src.model import simlarity_model as model -from src.similarity.similarity import Similarity - -similarity = Similarity() -models = similarity.get_models() - -def check(img_main, img_1, img_2, model_idx): - result = similarity.check_similarity([img_main, img_1, img_2], models[model_idx]) - return result - -with gr.Blocks() as demo: - gr.Markdown('Checking Image Similarity') - img_main = gr.Text(label='Main Image', placeholder='https://myimage.jpg') - - gr.Markdown('Images to check') - img_1 = gr.Text(label='1st Image', placeholder='https://myimage_1.jpg') - img_2 = gr.Text(label='2nd Image', placeholder='https://myimage_2.jpg') - - gr.Markdown('Choose the model') - model = gr.Dropdown([m.name for m in models], label='Model', type='index') - - gallery = gr.Gallery( - label="Generated images", show_label=False, elem_id="gallery" - ).style(grid=[2], height="auto") - - submit_btn = gr.Button('Check Similarity') - submit_btn.click(fn=check,inputs=[img_main, img_1, img_2, model], outputs=gallery) - -demo.launch() \ No newline at end of file diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_h32.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_h32.py deleted file mode 100644 index a31e3874f76f9f7b089ac8834d85df2441af9b0e..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_h32.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = [ - '../../configs/_base_/models/upernet_uniformer.py', - '../../configs/_base_/datasets/ade20k.py', - '../../configs/_base_/default_runtime.py', - '../../configs/_base_/schedules/schedule_160k.py' -] -model = dict( - backbone=dict( - type='UniFormer', - embed_dim=[64, 128, 320, 512], - layers=[3, 4, 8, 3], - head_dim=64, - drop_path_rate=0.25, - windows=False, - hybrid=True, - window_size=32 - ), - decode_head=dict( - in_channels=[64, 128, 320, 512], - num_classes=150 - ), - auxiliary_head=dict( - in_channels=320, - num_classes=150 - )) - -# AdamW optimizer, no weight decay for position embedding & layer norm in backbone -optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, - paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.), - 'relative_position_bias_table': dict(decay_mult=0.), - 'norm': dict(decay_mult=0.)})) - -lr_config = dict(_delete_=True, policy='poly', - warmup='linear', - warmup_iters=1500, - warmup_ratio=1e-6, - power=1.0, min_lr=0.0, by_epoch=False) - -data=dict(samples_per_gpu=2) \ No newline at end of file diff --git a/spaces/Anustup/NS_AI_LABS/app-local.py b/spaces/Anustup/NS_AI_LABS/app-local.py deleted file mode 100644 index d8eabbc62924dab3d0cc03a8a2373ffffe01eadc..0000000000000000000000000000000000000000 --- a/spaces/Anustup/NS_AI_LABS/app-local.py +++ /dev/null @@ -1,3 +0,0 @@ -# Run the app with no audio file restrictions -from app import create_ui -create_ui(-1) \ No newline at end of file diff --git a/spaces/Arcader7171/positive/README.md b/spaces/Arcader7171/positive/README.md deleted file mode 100644 index 9d58e9c4c7fba60658fb073293b2529488aa2e97..0000000000000000000000000000000000000000 --- a/spaces/Arcader7171/positive/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Positive -emoji: 🚀 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Armored-Atom/gpt2/app.py b/spaces/Armored-Atom/gpt2/app.py deleted file mode 100644 index 4205e03f91904065e1610f7e6c7b2f1de1771184..0000000000000000000000000000000000000000 --- a/spaces/Armored-Atom/gpt2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/gpt2").launch() \ No newline at end of file diff --git a/spaces/Artrajz/vits-simple-api/static/css/style.css b/spaces/Artrajz/vits-simple-api/static/css/style.css deleted file mode 100644 index 275ec332c1708e619b30a1fb9df2a1fd9ca45799..0000000000000000000000000000000000000000 --- a/spaces/Artrajz/vits-simple-api/static/css/style.css +++ /dev/null @@ -1,84 +0,0 @@ -.main-container { - position: relative; - width: 100%; - min-height: 300px; -} - -.container { - width: 300px; - position: relative; -} - - -/*tabs*/ -.tabs { - display: flex; - left: 0; -} - -.tab-button { - display: inline-block; - background-color: transparent; - padding: 5px 10px; - cursor: pointer; - margin-bottom: -2px; - border-top: 2px solid transparent; - border-left: 2px solid transparent; - border-right: 2px solid transparent; - border-bottom: 0px; - border-top-left-radius: 0.5rem; - border-top-right-radius: 0.5rem; - color: gray; -} - -.tab-button.active { - background-color: white; - border-top: 2px solid #dee2e6; - border-left: 2px solid #dee2e6; - border-right: 2px solid #dee2e6; - color: black; -} - -/*content*/ - -.content { - border: gray; - border-left-width: 2px; -} - -.content-pane { - display: none; - padding: 20px; -} - -.content-pane.active { - display: flex; - -ms-flex-wrap: wrap; - flex-wrap: wrap; -} - -*, :before, :after { - box-sizing: border-box; - border-width: 0; - border-style: solid; - border-color: #e5e7eb; -} - - -.flex { - display: flex; -} - -.border-transparent { - border-color: transparent; -} - -.border-b-2 { - border-bottom: 2px solid #dee2e6; -} - -.border-lr-2 { - border-left: 2px solid #dee2e6; - border-right: 2px solid #dee2e6; -} - diff --git a/spaces/AvaterClasher/Food_Classifier_Refined_MONI/README.md b/spaces/AvaterClasher/Food_Classifier_Refined_MONI/README.md deleted file mode 100644 index f23120a1858a6eb293712f6f3ef792b323f88d79..0000000000000000000000000000000000000000 --- a/spaces/AvaterClasher/Food_Classifier_Refined_MONI/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Food Classifier Refined MONI -emoji: 🐢 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Awesimo/jojogan/app.py b/spaces/Awesimo/jojogan/app.py deleted file mode 100644 index 603f709d7df99edf19f1885ff93629e58419e949..0000000000000000000000000000000000000000 --- a/spaces/Awesimo/jojogan/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -from PIL import Image -import torch -import gradio as gr -import torch -torch.backends.cudnn.benchmark = True -from torchvision import transforms, utils -from util import * -from PIL import Image -import math -import random -import numpy as np -from torch import nn, autograd, optim -from torch.nn import functional as F -from tqdm import tqdm -import lpips -from model import * -from copy import deepcopy -import imageio - -import os -import sys -import numpy as np -from PIL import Image -import torch -import torchvision.transforms as transforms -from argparse import Namespace -from e4e.models.psp import pSp -from util import * -from huggingface_hub import hf_hub_download - -device= 'cpu' -model_path_e = hf_hub_download(repo_id="akhaliq/JoJoGAN_e4e_ffhq_encode", filename="e4e_ffhq_encode.pt") -ckpt = torch.load(model_path_e, map_location='cpu') -opts = ckpt['opts'] -opts['checkpoint_path'] = model_path_e -opts= Namespace(**opts) -net = pSp(opts, device).eval().to(device) - -@ torch.no_grad() -def projection(img, name, device='cuda'): - - transform = transforms.Compose( - [ - transforms.Resize(256), - transforms.CenterCrop(256), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - img = transform(img).unsqueeze(0).to(device) - images, w_plus = net(img, randomize_noise=False, return_latents=True) - result_file = {} - result_file['latent'] = w_plus[0] - torch.save(result_file, name) - return w_plus[0] - -device = 'cpu' - -latent_dim = 512 - -model_path_s = hf_hub_download(repo_id="akhaliq/jojogan-stylegan2-ffhq-config-f", filename="stylegan2-ffhq-config-f.pt") -original_generator = Generator(1024, latent_dim, 8, 2).to(device) -ckpt = torch.load(model_path_s, map_location=lambda storage, loc: storage) -original_generator.load_state_dict(ckpt["g_ema"], strict=False) -mean_latent = original_generator.mean_latent(10000) - - -#MODELS -generatorzombie = deepcopy(original_generator) -generatorhulk = deepcopy(original_generator) -generatorjojo = deepcopy(original_generator) -generatorwalker = deepcopy(original_generator) - -transform = transforms.Compose( - [ - transforms.Resize((1024, 1024)), - transforms.ToTensor(), - transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ] -) - -#HULK -modelhulk = hf_hub_download(repo_id="Awesimo/jojogan-hulk", filename="hulk.pt") -ckpthulk = torch.load(modelhulk, map_location=lambda storage, loc: storage) -generatorhulk.load_state_dict(ckpthulk["g"], strict=False) - -#ZOMBIE -modelzombie = hf_hub_download(repo_id="Awesimo/jojogan-zombie", filename="zombie.pt") -ckptzombie = torch.load(modelzombie, map_location=lambda storage, loc: storage) -generatorzombie.load_state_dict(ckptzombie["g"], strict=False) - -#WHITE WALKER -modelwalker = hf_hub_download(repo_id="Awesimo/jojogan-white-walker", filename="white_walker_v2.pt") -ckptwalker = torch.load(modelwalker, map_location=lambda storage, loc: storage) -generatorwalker.load_state_dict(ckptwalker["g"], strict=False) - - -def inference(img, model): - img.save('out.jpg') - aligned_face = align_face('out.jpg') - - my_w = projection(aligned_face, "test.pt", device).unsqueeze(0) - if model == 'Hulk': - with torch.no_grad(): - my_sample = generatorhulk(my_w, input_is_latent=True) - elif model == 'Zombie': - with torch.no_grad(): - my_sample = generatorzombie(my_w, input_is_latent=True) - elif model == 'White-Walker': - with torch.no_grad(): - my_sample = generatorwalker(my_w, input_is_latent=True) - else: - with torch.no_grad(): - my_sample = generatorzombie(my_w, input_is_latent=True) - - - npimage = my_sample[0].permute(1, 2, 0).detach().numpy() - imageio.imwrite('filename.jpeg', npimage) - return 'filename.jpeg' - -title = "JoJoGAN Test 🤖" -examples=[['assets/samples/image01.jpg','Hulk'],['assets/samples/image02.jpg','Zombie'],['assets/samples/image03.jpg','White-Walker'],['assets/samples/image04.jpg','Hulk']] -gr.Interface(inference, [gr.inputs.Image(type="pil"),gr.inputs.Dropdown(choices=['Hulk', 'Zombie', 'White-Walker'], type="value", default='Hulk', label="Model")], gr.outputs.Image(type="file"),title=title,allow_flagging=False,examples=examples,allow_screenshot=False).launch() diff --git a/spaces/Awesimo/jojogan/e4e/editings/ganspace.py b/spaces/Awesimo/jojogan/e4e/editings/ganspace.py deleted file mode 100644 index 0c286a421280c542e9776a75e64bb65409da8fc7..0000000000000000000000000000000000000000 --- a/spaces/Awesimo/jojogan/e4e/editings/ganspace.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch - - -def edit(latents, pca, edit_directions): - edit_latents = [] - for latent in latents: - for pca_idx, start, end, strength in edit_directions: - delta = get_delta(pca, latent, pca_idx, strength) - delta_padded = torch.zeros(latent.shape).to('cuda') - delta_padded[start:end] += delta.repeat(end - start, 1) - edit_latents.append(latent + delta_padded) - return torch.stack(edit_latents) - - -def get_delta(pca, latent, idx, strength): - # pca: ganspace checkpoint. latent: (16, 512) w+ - w_centered = latent - pca['mean'].to('cuda') - lat_comp = pca['comp'].to('cuda') - lat_std = pca['std'].to('cuda') - w_coord = torch.sum(w_centered[0].reshape(-1)*lat_comp[idx].reshape(-1)) / lat_std[idx] - delta = (strength - w_coord)*lat_comp[idx]*lat_std[idx] - return delta diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/visualizer.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/visualizer.py deleted file mode 100644 index 8e145181871d1981e41db3c8cbc7e8f4cc7b5833..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/visualizer.py +++ /dev/null @@ -1,1267 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import colorsys -import logging -import math -import numpy as np -from enum import Enum, unique -import cv2 -import matplotlib as mpl -import matplotlib.colors as mplc -import matplotlib.figure as mplfigure -import pycocotools.mask as mask_util -import torch -from matplotlib.backends.backend_agg import FigureCanvasAgg -from PIL import Image - -from detectron2.data import MetadataCatalog -from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes -from detectron2.utils.file_io import PathManager - -from .colormap import random_color - -logger = logging.getLogger(__name__) - -__all__ = ["ColorMode", "VisImage", "Visualizer"] - - -_SMALL_OBJECT_AREA_THRESH = 1000 -_LARGE_MASK_AREA_THRESH = 120000 -_OFF_WHITE = (1.0, 1.0, 240.0 / 255) -_BLACK = (0, 0, 0) -_RED = (1.0, 0, 0) - -_KEYPOINT_THRESHOLD = 0.05 - - -@unique -class ColorMode(Enum): - """ - Enum of different color modes to use for instance visualizations. - """ - - IMAGE = 0 - """ - Picks a random color for every instance and overlay segmentations with low opacity. - """ - SEGMENTATION = 1 - """ - Let instances of the same category have similar colors - (from metadata.thing_colors), and overlay them with - high opacity. This provides more attention on the quality of segmentation. - """ - IMAGE_BW = 2 - """ - Same as IMAGE, but convert all areas without masks to gray-scale. - Only available for drawing per-instance mask predictions. - """ - - -class GenericMask: - """ - Attribute: - polygons (list[ndarray]): list[ndarray]: polygons for this mask. - Each ndarray has format [x, y, x, y, ...] - mask (ndarray): a binary mask - """ - - def __init__(self, mask_or_polygons, height, width): - self._mask = self._polygons = self._has_holes = None - self.height = height - self.width = width - - m = mask_or_polygons - if isinstance(m, dict): - # RLEs - assert "counts" in m and "size" in m - if isinstance(m["counts"], list): # uncompressed RLEs - h, w = m["size"] - assert h == height and w == width - m = mask_util.frPyObjects(m, h, w) - self._mask = mask_util.decode(m)[:, :] - return - - if isinstance(m, list): # list[ndarray] - self._polygons = [np.asarray(x).reshape(-1) for x in m] - return - - if isinstance(m, np.ndarray): # assumed to be a binary mask - assert m.shape[1] != 2, m.shape - assert m.shape == ( - height, - width, - ), f"mask shape: {m.shape}, target dims: {height}, {width}" - self._mask = m.astype("uint8") - return - - raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) - - @property - def mask(self): - if self._mask is None: - self._mask = self.polygons_to_mask(self._polygons) - return self._mask - - @property - def polygons(self): - if self._polygons is None: - self._polygons, self._has_holes = self.mask_to_polygons(self._mask) - return self._polygons - - @property - def has_holes(self): - if self._has_holes is None: - if self._mask is not None: - self._polygons, self._has_holes = self.mask_to_polygons(self._mask) - else: - self._has_holes = False # if original format is polygon, does not have holes - return self._has_holes - - def mask_to_polygons(self, mask): - # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level - # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. - # Internal contours (holes) are placed in hierarchy-2. - # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. - mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr - res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) - hierarchy = res[-1] - if hierarchy is None: # empty mask - return [], False - has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 - res = res[-2] - res = [x.flatten() for x in res] - # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. - # We add 0.5 to turn them into real-value coordinate space. A better solution - # would be to first +0.5 and then dilate the returned polygon by 0.5. - res = [x + 0.5 for x in res if len(x) >= 6] - return res, has_holes - - def polygons_to_mask(self, polygons): - rle = mask_util.frPyObjects(polygons, self.height, self.width) - rle = mask_util.merge(rle) - return mask_util.decode(rle)[:, :] - - def area(self): - return self.mask.sum() - - def bbox(self): - p = mask_util.frPyObjects(self.polygons, self.height, self.width) - p = mask_util.merge(p) - bbox = mask_util.toBbox(p) - bbox[2] += bbox[0] - bbox[3] += bbox[1] - return bbox - - -class _PanopticPrediction: - """ - Unify different panoptic annotation/prediction formats - """ - - def __init__(self, panoptic_seg, segments_info, metadata=None): - if segments_info is None: - assert metadata is not None - # If "segments_info" is None, we assume "panoptic_img" is a - # H*W int32 image storing the panoptic_id in the format of - # category_id * label_divisor + instance_id. We reserve -1 for - # VOID label. - label_divisor = metadata.label_divisor - segments_info = [] - for panoptic_label in np.unique(panoptic_seg.numpy()): - if panoptic_label == -1: - # VOID region. - continue - pred_class = panoptic_label // label_divisor - isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() - segments_info.append( - { - "id": int(panoptic_label), - "category_id": int(pred_class), - "isthing": bool(isthing), - } - ) - del metadata - - self._seg = panoptic_seg - - self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info - segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) - areas = areas.numpy() - sorted_idxs = np.argsort(-areas) - self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] - self._seg_ids = self._seg_ids.tolist() - for sid, area in zip(self._seg_ids, self._seg_areas): - if sid in self._sinfo: - self._sinfo[sid]["area"] = float(area) - - def non_empty_mask(self): - """ - Returns: - (H, W) array, a mask for all pixels that have a prediction - """ - empty_ids = [] - for id in self._seg_ids: - if id not in self._sinfo: - empty_ids.append(id) - if len(empty_ids) == 0: - return np.zeros(self._seg.shape, dtype=np.uint8) - assert ( - len(empty_ids) == 1 - ), ">1 ids corresponds to no labels. This is currently not supported" - return (self._seg != empty_ids[0]).numpy().astype(np.bool) - - def semantic_masks(self): - for sid in self._seg_ids: - sinfo = self._sinfo.get(sid) - if sinfo is None or sinfo["isthing"]: - # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. - continue - yield (self._seg == sid).numpy().astype(np.bool), sinfo - - def instance_masks(self): - for sid in self._seg_ids: - sinfo = self._sinfo.get(sid) - if sinfo is None or not sinfo["isthing"]: - continue - mask = (self._seg == sid).numpy().astype(np.bool) - if mask.sum() > 0: - yield mask, sinfo - - -def _create_text_labels(classes, scores, class_names, is_crowd=None): - """ - Args: - classes (list[int] or None): - scores (list[float] or None): - class_names (list[str] or None): - is_crowd (list[bool] or None): - - Returns: - list[str] or None - """ - labels = None - if classes is not None: - if class_names is not None and len(class_names) > 0: - labels = [class_names[i] for i in classes] - else: - labels = [str(i) for i in classes] - if scores is not None: - if labels is None: - labels = ["{:.0f}%".format(s * 100) for s in scores] - else: - labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] - if labels is not None and is_crowd is not None: - labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] - return labels - - -class VisImage: - def __init__(self, img, scale=1.0): - """ - Args: - img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. - scale (float): scale the input image - """ - self.img = img - self.scale = scale - self.width, self.height = img.shape[1], img.shape[0] - self._setup_figure(img) - - def _setup_figure(self, img): - """ - Args: - Same as in :meth:`__init__()`. - - Returns: - fig (matplotlib.pyplot.figure): top level container for all the image plot elements. - ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. - """ - fig = mplfigure.Figure(frameon=False) - self.dpi = fig.get_dpi() - # add a small 1e-2 to avoid precision lost due to matplotlib's truncation - # (https://github.com/matplotlib/matplotlib/issues/15363) - fig.set_size_inches( - (self.width * self.scale + 1e-2) / self.dpi, - (self.height * self.scale + 1e-2) / self.dpi, - ) - self.canvas = FigureCanvasAgg(fig) - # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) - ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) - ax.axis("off") - self.fig = fig - self.ax = ax - self.reset_image(img) - - def reset_image(self, img): - """ - Args: - img: same as in __init__ - """ - img = img.astype("uint8") - self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") - - def save(self, filepath): - """ - Args: - filepath (str): a string that contains the absolute path, including the file name, where - the visualized image will be saved. - """ - self.fig.savefig(filepath) - - def get_image(self): - """ - Returns: - ndarray: - the visualized image of shape (H, W, 3) (RGB) in uint8 type. - The shape is scaled w.r.t the input image using the given `scale` argument. - """ - canvas = self.canvas - s, (width, height) = canvas.print_to_buffer() - # buf = io.BytesIO() # works for cairo backend - # canvas.print_rgba(buf) - # width, height = self.width, self.height - # s = buf.getvalue() - - buffer = np.frombuffer(s, dtype="uint8") - - img_rgba = buffer.reshape(height, width, 4) - rgb, alpha = np.split(img_rgba, [3], axis=2) - return rgb.astype("uint8") - - -class Visualizer: - """ - Visualizer that draws data about detection/segmentation on images. - - It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` - that draw primitive objects to images, as well as high-level wrappers like - `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` - that draw composite data in some pre-defined style. - - Note that the exact visualization style for the high-level wrappers are subject to change. - Style such as color, opacity, label contents, visibility of labels, or even the visibility - of objects themselves (e.g. when the object is too small) may change according - to different heuristics, as long as the results still look visually reasonable. - - To obtain a consistent style, you can implement custom drawing functions with the - abovementioned primitive methods instead. If you need more customized visualization - styles, you can process the data yourself following their format documented in - tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not - intend to satisfy everyone's preference on drawing styles. - - This visualizer focuses on high rendering quality rather than performance. It is not - designed to be used for real-time applications. - """ - - # TODO implement a fast, rasterized version using OpenCV - - def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): - """ - Args: - img_rgb: a numpy array of shape (H, W, C), where H and W correspond to - the height and width of the image respectively. C is the number of - color channels. The image is required to be in RGB format since that - is a requirement of the Matplotlib library. The image is also expected - to be in the range [0, 255]. - metadata (Metadata): dataset metadata (e.g. class names and colors) - instance_mode (ColorMode): defines one of the pre-defined style for drawing - instances on an image. - """ - self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) - if metadata is None: - metadata = MetadataCatalog.get("__nonexist__") - self.metadata = metadata - self.output = VisImage(self.img, scale=scale) - self.cpu_device = torch.device("cpu") - - # too small texts are useless, therefore clamp to 9 - self._default_font_size = max( - np.sqrt(self.output.height * self.output.width) // 90, 10 // scale - ) - self._instance_mode = instance_mode - self.keypoint_threshold = _KEYPOINT_THRESHOLD - - def draw_instance_predictions(self, predictions): - """ - Draw instance-level prediction results on an image. - - Args: - predictions (Instances): the output of an instance detection/segmentation - model. Following fields will be used to draw: - "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). - - Returns: - output (VisImage): image object with visualizations. - """ - boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None - scores = predictions.scores if predictions.has("scores") else None - classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None - labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) - keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None - - if predictions.has("pred_masks"): - masks = np.asarray(predictions.pred_masks) - masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] - else: - masks = None - - if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes - ] - alpha = 0.8 - else: - colors = None - alpha = 0.5 - - if self._instance_mode == ColorMode.IMAGE_BW: - self.output.reset_image( - self._create_grayscale_image( - (predictions.pred_masks.any(dim=0) > 0).numpy() - if predictions.has("pred_masks") - else None - ) - ) - alpha = 0.3 - - self.overlay_instances( - masks=masks, - boxes=boxes, - labels=labels, - keypoints=keypoints, - assigned_colors=colors, - alpha=alpha, - ) - return self.output - - def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): - """ - Draw semantic segmentation predictions/labels. - - Args: - sem_seg (Tensor or ndarray): the segmentation of shape (H, W). - Each value is the integer label of the pixel. - area_threshold (int): segments with less than `area_threshold` are not drawn. - alpha (float): the larger it is, the more opaque the segmentations are. - - Returns: - output (VisImage): image object with visualizations. - """ - if isinstance(sem_seg, torch.Tensor): - sem_seg = sem_seg.numpy() - labels, areas = np.unique(sem_seg, return_counts=True) - sorted_idxs = np.argsort(-areas).tolist() - labels = labels[sorted_idxs] - for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): - try: - mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] - except (AttributeError, IndexError): - mask_color = None - - binary_mask = (sem_seg == label).astype(np.uint8) - text = self.metadata.stuff_classes[label] - self.draw_binary_mask( - binary_mask, - color=mask_color, - edge_color=_OFF_WHITE, - text=text, - alpha=alpha, - area_threshold=area_threshold, - ) - return self.output - - def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): - """ - Draw panoptic prediction annotations or results. - - Args: - panoptic_seg (Tensor): of shape (height, width) where the values are ids for each - segment. - segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. - If it is a ``list[dict]``, each dict contains keys "id", "category_id". - If None, category id of each pixel is computed by - ``pixel // metadata.label_divisor``. - area_threshold (int): stuff segments with less than `area_threshold` are not drawn. - - Returns: - output (VisImage): image object with visualizations. - """ - pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) - - if self._instance_mode == ColorMode.IMAGE_BW: - self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) - - # draw mask for all semantic segments first i.e. "stuff" - for mask, sinfo in pred.semantic_masks(): - category_idx = sinfo["category_id"] - try: - mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] - except AttributeError: - mask_color = None - - text = self.metadata.stuff_classes[category_idx] - self.draw_binary_mask( - mask, - color=mask_color, - edge_color=_OFF_WHITE, - text=text, - alpha=alpha, - area_threshold=area_threshold, - ) - - # draw mask for all instances second - all_instances = list(pred.instance_masks()) - if len(all_instances) == 0: - return self.output - masks, sinfo = list(zip(*all_instances)) - category_ids = [x["category_id"] for x in sinfo] - - try: - scores = [x["score"] for x in sinfo] - except KeyError: - scores = None - labels = _create_text_labels( - category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] - ) - - try: - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids - ] - except AttributeError: - colors = None - self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) - - return self.output - - draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility - - def draw_dataset_dict(self, dic): - """ - Draw annotations/segmentaions in Detectron2 Dataset format. - - Args: - dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. - - Returns: - output (VisImage): image object with visualizations. - """ - annos = dic.get("annotations", None) - if annos: - if "segmentation" in annos[0]: - masks = [x["segmentation"] for x in annos] - else: - masks = None - if "keypoints" in annos[0]: - keypts = [x["keypoints"] for x in annos] - keypts = np.array(keypts).reshape(len(annos), -1, 3) - else: - keypts = None - - boxes = [ - BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) - if len(x["bbox"]) == 4 - else x["bbox"] - for x in annos - ] - - colors = None - category_ids = [x["category_id"] for x in annos] - if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) - for c in category_ids - ] - names = self.metadata.get("thing_classes", None) - labels = _create_text_labels( - category_ids, - scores=None, - class_names=names, - is_crowd=[x.get("iscrowd", 0) for x in annos], - ) - self.overlay_instances( - labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors - ) - - sem_seg = dic.get("sem_seg", None) - if sem_seg is None and "sem_seg_file_name" in dic: - with PathManager.open(dic["sem_seg_file_name"], "rb") as f: - sem_seg = Image.open(f) - sem_seg = np.asarray(sem_seg, dtype="uint8") - if sem_seg is not None: - self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) - - pan_seg = dic.get("pan_seg", None) - if pan_seg is None and "pan_seg_file_name" in dic: - with PathManager.open(dic["pan_seg_file_name"], "rb") as f: - pan_seg = Image.open(f) - pan_seg = np.asarray(pan_seg) - from panopticapi.utils import rgb2id - - pan_seg = rgb2id(pan_seg) - if pan_seg is not None: - segments_info = dic["segments_info"] - pan_seg = torch.tensor(pan_seg) - self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) - return self.output - - def overlay_instances( - self, - *, - boxes=None, - labels=None, - masks=None, - keypoints=None, - assigned_colors=None, - alpha=0.5, - ): - """ - Args: - boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, - or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, - or a :class:`RotatedBoxes`, - or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format - for the N objects in a single image, - labels (list[str]): the text to be displayed for each instance. - masks (masks-like object): Supported types are: - - * :class:`detectron2.structures.PolygonMasks`, - :class:`detectron2.structures.BitMasks`. - * list[list[ndarray]]: contains the segmentation masks for all objects in one image. - The first level of the list corresponds to individual instances. The second - level to all the polygon that compose the instance, and the third level - to the polygon coordinates. The third level should have the format of - [x0, y0, x1, y1, ..., xn, yn] (n >= 3). - * list[ndarray]: each ndarray is a binary mask of shape (H, W). - * list[dict]: each dict is a COCO-style RLE. - keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), - where the N is the number of instances and K is the number of keypoints. - The last dimension corresponds to (x, y, visibility or score). - assigned_colors (list[matplotlib.colors]): a list of colors, where each color - corresponds to each mask or box in the image. Refer to 'matplotlib.colors' - for full list of formats that the colors are accepted in. - Returns: - output (VisImage): image object with visualizations. - """ - num_instances = 0 - if boxes is not None: - boxes = self._convert_boxes(boxes) - num_instances = len(boxes) - if masks is not None: - masks = self._convert_masks(masks) - if num_instances: - assert len(masks) == num_instances - else: - num_instances = len(masks) - if keypoints is not None: - if num_instances: - assert len(keypoints) == num_instances - else: - num_instances = len(keypoints) - keypoints = self._convert_keypoints(keypoints) - if labels is not None: - assert len(labels) == num_instances - if assigned_colors is None: - assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] - if num_instances == 0: - return self.output - if boxes is not None and boxes.shape[1] == 5: - return self.overlay_rotated_instances( - boxes=boxes, labels=labels, assigned_colors=assigned_colors - ) - - # Display in largest to smallest order to reduce occlusion. - areas = None - if boxes is not None: - areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) - elif masks is not None: - areas = np.asarray([x.area() for x in masks]) - - if areas is not None: - sorted_idxs = np.argsort(-areas).tolist() - # Re-order overlapped instances in descending order. - boxes = boxes[sorted_idxs] if boxes is not None else None - labels = [labels[k] for k in sorted_idxs] if labels is not None else None - masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None - assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] - keypoints = keypoints[sorted_idxs] if keypoints is not None else None - - for i in range(num_instances): - color = assigned_colors[i] - if boxes is not None: - self.draw_box(boxes[i], edge_color=color) - - if masks is not None: - for segment in masks[i].polygons: - self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) - - if labels is not None: - # first get a box - if boxes is not None: - x0, y0, x1, y1 = boxes[i] - text_pos = (x0, y0) # if drawing boxes, put text on the box corner. - horiz_align = "left" - elif masks is not None: - # skip small mask without polygon - if len(masks[i].polygons) == 0: - continue - - x0, y0, x1, y1 = masks[i].bbox() - - # draw text in the center (defined by median) when box is not drawn - # median is less sensitive to outliers. - text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] - horiz_align = "center" - else: - continue # drawing the box confidence for keypoints isn't very useful. - # for small objects, draw text at the side to avoid occlusion - instance_area = (y1 - y0) * (x1 - x0) - if ( - instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale - or y1 - y0 < 40 * self.output.scale - ): - if y1 >= self.output.height - 5: - text_pos = (x1, y0) - else: - text_pos = (x0, y1) - - height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - font_size = ( - np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) - * 0.5 - * self._default_font_size - ) - self.draw_text( - labels[i], - text_pos, - color=lighter_color, - horizontal_alignment=horiz_align, - font_size=font_size, - ) - - # draw keypoints - if keypoints is not None: - for keypoints_per_instance in keypoints: - self.draw_and_connect_keypoints(keypoints_per_instance) - - return self.output - - def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): - """ - Args: - boxes (ndarray): an Nx5 numpy array of - (x_center, y_center, width, height, angle_degrees) format - for the N objects in a single image. - labels (list[str]): the text to be displayed for each instance. - assigned_colors (list[matplotlib.colors]): a list of colors, where each color - corresponds to each mask or box in the image. Refer to 'matplotlib.colors' - for full list of formats that the colors are accepted in. - - Returns: - output (VisImage): image object with visualizations. - """ - num_instances = len(boxes) - - if assigned_colors is None: - assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] - if num_instances == 0: - return self.output - - # Display in largest to smallest order to reduce occlusion. - if boxes is not None: - areas = boxes[:, 2] * boxes[:, 3] - - sorted_idxs = np.argsort(-areas).tolist() - # Re-order overlapped instances in descending order. - boxes = boxes[sorted_idxs] - labels = [labels[k] for k in sorted_idxs] if labels is not None else None - colors = [assigned_colors[idx] for idx in sorted_idxs] - - for i in range(num_instances): - self.draw_rotated_box_with_label( - boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None - ) - - return self.output - - def draw_and_connect_keypoints(self, keypoints): - """ - Draws keypoints of an instance and follows the rules for keypoint connections - to draw lines between appropriate keypoints. This follows color heuristics for - line color. - - Args: - keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints - and the last dimension corresponds to (x, y, probability). - - Returns: - output (VisImage): image object with visualizations. - """ - visible = {} - keypoint_names = self.metadata.get("keypoint_names") - for idx, keypoint in enumerate(keypoints): - - # draw keypoint - x, y, prob = keypoint - if prob > self.keypoint_threshold: - self.draw_circle((x, y), color=_RED) - if keypoint_names: - keypoint_name = keypoint_names[idx] - visible[keypoint_name] = (x, y) - - if self.metadata.get("keypoint_connection_rules"): - for kp0, kp1, color in self.metadata.keypoint_connection_rules: - if kp0 in visible and kp1 in visible: - x0, y0 = visible[kp0] - x1, y1 = visible[kp1] - color = tuple(x / 255.0 for x in color) - self.draw_line([x0, x1], [y0, y1], color=color) - - # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip - # Note that this strategy is specific to person keypoints. - # For other keypoints, it should just do nothing - try: - ls_x, ls_y = visible["left_shoulder"] - rs_x, rs_y = visible["right_shoulder"] - mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 - except KeyError: - pass - else: - # draw line from nose to mid-shoulder - nose_x, nose_y = visible.get("nose", (None, None)) - if nose_x is not None: - self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) - - try: - # draw line from mid-shoulder to mid-hip - lh_x, lh_y = visible["left_hip"] - rh_x, rh_y = visible["right_hip"] - except KeyError: - pass - else: - mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 - self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) - return self.output - - """ - Primitive drawing functions: - """ - - def draw_text( - self, - text, - position, - *, - font_size=None, - color="g", - horizontal_alignment="center", - rotation=0, - ): - """ - Args: - text (str): class label - position (tuple): a tuple of the x and y coordinates to place text on image. - font_size (int, optional): font of the text. If not provided, a font size - proportional to the image width is calculated and used. - color: color of the text. Refer to `matplotlib.colors` for full list - of formats that are accepted. - horizontal_alignment (str): see `matplotlib.text.Text` - rotation: rotation angle in degrees CCW - - Returns: - output (VisImage): image object with text drawn. - """ - if not font_size: - font_size = self._default_font_size - - # since the text background is dark, we don't want the text to be dark - color = np.maximum(list(mplc.to_rgb(color)), 0.2) - color[np.argmax(color)] = max(0.8, np.max(color)) - - x, y = position - self.output.ax.text( - x, - y, - text, - size=font_size * self.output.scale, - family="sans-serif", - bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, - verticalalignment="top", - horizontalalignment=horizontal_alignment, - color=color, - zorder=10, - rotation=rotation, - ) - return self.output - - def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): - """ - Args: - box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 - are the coordinates of the image's top left corner. x1 and y1 are the - coordinates of the image's bottom right corner. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - edge_color: color of the outline of the box. Refer to `matplotlib.colors` - for full list of formats that are accepted. - line_style (string): the string to use to create the outline of the boxes. - - Returns: - output (VisImage): image object with box drawn. - """ - x0, y0, x1, y1 = box_coord - width = x1 - x0 - height = y1 - y0 - - linewidth = max(self._default_font_size / 4, 1) - - self.output.ax.add_patch( - mpl.patches.Rectangle( - (x0, y0), - width, - height, - fill=False, - edgecolor=edge_color, - linewidth=linewidth * self.output.scale, - alpha=alpha, - linestyle=line_style, - ) - ) - return self.output - - def draw_rotated_box_with_label( - self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None - ): - """ - Draw a rotated box with label on its top-left corner. - - Args: - rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), - where cnt_x and cnt_y are the center coordinates of the box. - w and h are the width and height of the box. angle represents how - many degrees the box is rotated CCW with regard to the 0-degree box. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - edge_color: color of the outline of the box. Refer to `matplotlib.colors` - for full list of formats that are accepted. - line_style (string): the string to use to create the outline of the boxes. - label (string): label for rotated box. It will not be rendered when set to None. - - Returns: - output (VisImage): image object with box drawn. - """ - cnt_x, cnt_y, w, h, angle = rotated_box - area = w * h - # use thinner lines when the box is small - linewidth = self._default_font_size / ( - 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 - ) - - theta = angle * math.pi / 180.0 - c = math.cos(theta) - s = math.sin(theta) - rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] - # x: left->right ; y: top->down - rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] - for k in range(4): - j = (k + 1) % 4 - self.draw_line( - [rotated_rect[k][0], rotated_rect[j][0]], - [rotated_rect[k][1], rotated_rect[j][1]], - color=edge_color, - linestyle="--" if k == 1 else line_style, - linewidth=linewidth, - ) - - if label is not None: - text_pos = rotated_rect[1] # topleft corner - - height_ratio = h / np.sqrt(self.output.height * self.output.width) - label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) - font_size = ( - np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size - ) - self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) - - return self.output - - def draw_circle(self, circle_coord, color, radius=3): - """ - Args: - circle_coord (list(int) or tuple(int)): contains the x and y coordinates - of the center of the circle. - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - radius (int): radius of the circle. - - Returns: - output (VisImage): image object with box drawn. - """ - x, y = circle_coord - self.output.ax.add_patch( - mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) - ) - return self.output - - def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): - """ - Args: - x_data (list[int]): a list containing x values of all the points being drawn. - Length of list should match the length of y_data. - y_data (list[int]): a list containing y values of all the points being drawn. - Length of list should match the length of x_data. - color: color of the line. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - linestyle: style of the line. Refer to `matplotlib.lines.Line2D` - for a full list of formats that are accepted. - linewidth (float or None): width of the line. When it's None, - a default value will be computed and used. - - Returns: - output (VisImage): image object with line drawn. - """ - if linewidth is None: - linewidth = self._default_font_size / 3 - linewidth = max(linewidth, 1) - self.output.ax.add_line( - mpl.lines.Line2D( - x_data, - y_data, - linewidth=linewidth * self.output.scale, - color=color, - linestyle=linestyle, - ) - ) - return self.output - - def draw_binary_mask( - self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10 - ): - """ - Args: - binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and - W is the image width. Each value in the array is either a 0 or 1 value of uint8 - type. - color: color of the mask. Refer to `matplotlib.colors` for a full list of - formats that are accepted. If None, will pick a random color. - edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a - full list of formats that are accepted. - text (str): if None, will be drawn on the object - alpha (float): blending efficient. Smaller values lead to more transparent masks. - area_threshold (float): a connected component smaller than this area will not be shown. - - Returns: - output (VisImage): image object with mask drawn. - """ - if color is None: - color = random_color(rgb=True, maximum=1) - color = mplc.to_rgb(color) - - has_valid_segment = False - binary_mask = binary_mask.astype("uint8") # opencv needs uint8 - mask = GenericMask(binary_mask, self.output.height, self.output.width) - shape2d = (binary_mask.shape[0], binary_mask.shape[1]) - - if not mask.has_holes: - # draw polygons for regular masks - for segment in mask.polygons: - area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) - if area < (area_threshold or 0): - continue - has_valid_segment = True - segment = segment.reshape(-1, 2) - self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) - else: - # TODO: Use Path/PathPatch to draw vector graphics: - # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon - rgba = np.zeros(shape2d + (4,), dtype="float32") - rgba[:, :, :3] = color - rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha - has_valid_segment = True - self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) - - if text is not None and has_valid_segment: - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - self._draw_text_in_mask(binary_mask, text, lighter_color) - return self.output - - def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): - """ - Args: - soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. - color: color of the mask. Refer to `matplotlib.colors` for a full list of - formats that are accepted. If None, will pick a random color. - text (str): if None, will be drawn on the object - alpha (float): blending efficient. Smaller values lead to more transparent masks. - - Returns: - output (VisImage): image object with mask drawn. - """ - if color is None: - color = random_color(rgb=True, maximum=1) - color = mplc.to_rgb(color) - - shape2d = (soft_mask.shape[0], soft_mask.shape[1]) - rgba = np.zeros(shape2d + (4,), dtype="float32") - rgba[:, :, :3] = color - rgba[:, :, 3] = soft_mask * alpha - self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) - - if text is not None: - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - binary_mask = (soft_mask > 0.5).astype("uint8") - self._draw_text_in_mask(binary_mask, text, lighter_color) - return self.output - - def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): - """ - Args: - segment: numpy array of shape Nx2, containing all the points in the polygon. - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a - full list of formats that are accepted. If not provided, a darker shade - of the polygon color will be used instead. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - - Returns: - output (VisImage): image object with polygon drawn. - """ - if edge_color is None: - # make edge color darker than the polygon color - if alpha > 0.8: - edge_color = self._change_color_brightness(color, brightness_factor=-0.7) - else: - edge_color = color - edge_color = mplc.to_rgb(edge_color) + (1,) - - polygon = mpl.patches.Polygon( - segment, - fill=True, - facecolor=mplc.to_rgb(color) + (alpha,), - edgecolor=edge_color, - linewidth=max(self._default_font_size // 15 * self.output.scale, 1), - ) - self.output.ax.add_patch(polygon) - return self.output - - """ - Internal methods: - """ - - def _jitter(self, color): - """ - Randomly modifies given color to produce a slightly different color than the color given. - - Args: - color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color - picked. The values in the list are in the [0.0, 1.0] range. - - Returns: - jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the - color after being jittered. The values in the list are in the [0.0, 1.0] range. - """ - color = mplc.to_rgb(color) - vec = np.random.rand(3) - # better to do it in another color space - vec = vec / np.linalg.norm(vec) * 0.5 - res = np.clip(vec + color, 0, 1) - return tuple(res) - - def _create_grayscale_image(self, mask=None): - """ - Create a grayscale version of the original image. - The colors in masked area, if given, will be kept. - """ - img_bw = self.img.astype("f4").mean(axis=2) - img_bw = np.stack([img_bw] * 3, axis=2) - if mask is not None: - img_bw[mask] = self.img[mask] - return img_bw - - def _change_color_brightness(self, color, brightness_factor): - """ - Depending on the brightness_factor, gives a lighter or darker color i.e. a color with - less or more saturation than the original color. - - Args: - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of - 0 will correspond to no change, a factor in [-1.0, 0) range will result in - a darker color and a factor in (0, 1.0] range will result in a lighter color. - - Returns: - modified_color (tuple[double]): a tuple containing the RGB values of the - modified color. Each value in the tuple is in the [0.0, 1.0] range. - """ - assert brightness_factor >= -1.0 and brightness_factor <= 1.0 - color = mplc.to_rgb(color) - polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) - modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) - modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness - modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness - modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) - return modified_color - - def _convert_boxes(self, boxes): - """ - Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. - """ - if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): - return boxes.tensor.detach().numpy() - else: - return np.asarray(boxes) - - def _convert_masks(self, masks_or_polygons): - """ - Convert different format of masks or polygons to a tuple of masks and polygons. - - Returns: - list[GenericMask]: - """ - - m = masks_or_polygons - if isinstance(m, PolygonMasks): - m = m.polygons - if isinstance(m, BitMasks): - m = m.tensor.numpy() - if isinstance(m, torch.Tensor): - m = m.numpy() - ret = [] - for x in m: - if isinstance(x, GenericMask): - ret.append(x) - else: - ret.append(GenericMask(x, self.output.height, self.output.width)) - return ret - - def _draw_text_in_mask(self, binary_mask, text, color): - """ - Find proper places to draw text given a binary mask. - """ - # TODO sometimes drawn on wrong objects. the heuristics here can improve. - _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) - if stats[1:, -1].size == 0: - return - largest_component_id = np.argmax(stats[1:, -1]) + 1 - - # draw text on the largest component, as well as other very large components. - for cid in range(1, _num_cc): - if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: - # median is more stable than centroid - # center = centroids[largest_component_id] - center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] - self.draw_text(text, center, color=color) - - def _convert_keypoints(self, keypoints): - if isinstance(keypoints, Keypoints): - keypoints = keypoints.tensor - keypoints = np.asarray(keypoints) - return keypoints - - def get_output(self): - """ - Returns: - output (VisImage): the image output containing the visualizations added - to the image. - """ - return self.output diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/iou_loss.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/iou_loss.py deleted file mode 100644 index 6a02464651dc1a0dcec9f30285a3a4ef74209f89..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/layers/iou_loss.py +++ /dev/null @@ -1,121 +0,0 @@ -import torch -from torch import nn - - -class IOULoss(nn.Module): - def __init__(self, loc_loss_type='iou'): - super(IOULoss, self).__init__() - self.loc_loss_type = loc_loss_type - - def forward(self, pred, target, weight=None, reduction='sum'): - pred_left = pred[:, 0] - pred_top = pred[:, 1] - pred_right = pred[:, 2] - pred_bottom = pred[:, 3] - - target_left = target[:, 0] - target_top = target[:, 1] - target_right = target[:, 2] - target_bottom = target[:, 3] - - target_aera = (target_left + target_right) * \ - (target_top + target_bottom) - pred_aera = (pred_left + pred_right) * \ - (pred_top + pred_bottom) - - w_intersect = torch.min(pred_left, target_left) + \ - torch.min(pred_right, target_right) - h_intersect = torch.min(pred_bottom, target_bottom) + \ - torch.min(pred_top, target_top) - - g_w_intersect = torch.max(pred_left, target_left) + \ - torch.max(pred_right, target_right) - g_h_intersect = torch.max(pred_bottom, target_bottom) + \ - torch.max(pred_top, target_top) - ac_uion = g_w_intersect * g_h_intersect - - area_intersect = w_intersect * h_intersect - area_union = target_aera + pred_aera - area_intersect - - ious = (area_intersect + 1.0) / (area_union + 1.0) - gious = ious - (ac_uion - area_union) / ac_uion - if self.loc_loss_type == 'iou': - losses = -torch.log(ious) - elif self.loc_loss_type == 'linear_iou': - losses = 1 - ious - elif self.loc_loss_type == 'giou': - losses = 1 - gious - else: - raise NotImplementedError - - if weight is not None: - losses = losses * weight - else: - losses = losses - - if reduction == 'sum': - return losses.sum() - elif reduction == 'batch': - return losses.sum(dim=[1]) - elif reduction == 'none': - return losses - else: - raise NotImplementedError - - -def giou_loss( - boxes1: torch.Tensor, - boxes2: torch.Tensor, - reduction: str = "none", - eps: float = 1e-7, -) -> torch.Tensor: - """ - Generalized Intersection over Union Loss (Hamid Rezatofighi et. al) - https://arxiv.org/abs/1902.09630 - Gradient-friendly IoU loss with an additional penalty that is non-zero when the - boxes do not overlap and scales with the size of their smallest enclosing box. - This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable. - Args: - boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). - reduction: 'none' | 'mean' | 'sum' - 'none': No reduction will be applied to the output. - 'mean': The output will be averaged. - 'sum': The output will be summed. - eps (float): small number to prevent division by zero - """ - - x1, y1, x2, y2 = boxes1.unbind(dim=-1) - x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) - - assert (x2 >= x1).all(), "bad box: x1 larger than x2" - assert (y2 >= y1).all(), "bad box: y1 larger than y2" - - # Intersection keypoints - xkis1 = torch.max(x1, x1g) - ykis1 = torch.max(y1, y1g) - xkis2 = torch.min(x2, x2g) - ykis2 = torch.min(y2, y2g) - - intsctk = torch.zeros_like(x1) - mask = (ykis2 > ykis1) & (xkis2 > xkis1) - intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) - unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk - iouk = intsctk / (unionk + eps) - - # smallest enclosing box - xc1 = torch.min(x1, x1g) - yc1 = torch.min(y1, y1g) - xc2 = torch.max(x2, x2g) - yc2 = torch.max(y2, y2g) - - area_c = (xc2 - xc1) * (yc2 - yc1) - miouk = iouk - ((area_c - unionk) / (area_c + eps)) - - loss = 1 - miouk - - if reduction == "mean": - loss = loss.mean() - elif reduction == "sum": - loss = loss.sum() - - return loss \ No newline at end of file diff --git a/spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_61968KB.py b/spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_61968KB.py deleted file mode 100644 index becbfae85683a13bbb19d3ea6c840da24e61e01e..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/nets_61968KB.py +++ /dev/null @@ -1,122 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import layers_123821KB as layers - - -class BaseASPPNet(nn.Module): - def __init__(self, nin, ch, dilations=(4, 8, 16)): - super(BaseASPPNet, self).__init__() - self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) - self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) - self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) - self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) - - self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations) - - self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) - self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) - self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) - self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) - - def __call__(self, x): - h, e1 = self.enc1(x) - h, e2 = self.enc2(h) - h, e3 = self.enc3(h) - h, e4 = self.enc4(h) - - h = self.aspp(h) - - h = self.dec4(h, e4) - h = self.dec3(h, e3) - h = self.dec2(h, e2) - h = self.dec1(h, e1) - - return h - - -class CascadedASPPNet(nn.Module): - def __init__(self, n_fft): - super(CascadedASPPNet, self).__init__() - self.stg1_low_band_net = BaseASPPNet(2, 32) - self.stg1_high_band_net = BaseASPPNet(2, 32) - - self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0) - self.stg2_full_band_net = BaseASPPNet(16, 32) - - self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0) - self.stg3_full_band_net = BaseASPPNet(32, 64) - - self.out = nn.Conv2d(64, 2, 1, bias=False) - self.aux1_out = nn.Conv2d(32, 2, 1, bias=False) - self.aux2_out = nn.Conv2d(32, 2, 1, bias=False) - - self.max_bin = n_fft // 2 - self.output_bin = n_fft // 2 + 1 - - self.offset = 128 - - def forward(self, x, aggressiveness=None): - mix = x.detach() - x = x.clone() - - x = x[:, :, : self.max_bin] - - bandw = x.size()[2] // 2 - aux1 = torch.cat( - [ - self.stg1_low_band_net(x[:, :, :bandw]), - self.stg1_high_band_net(x[:, :, bandw:]), - ], - dim=2, - ) - - h = torch.cat([x, aux1], dim=1) - aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) - - h = torch.cat([x, aux1, aux2], dim=1) - h = self.stg3_full_band_net(self.stg3_bridge(h)) - - mask = torch.sigmoid(self.out(h)) - mask = F.pad( - input=mask, - pad=(0, 0, 0, self.output_bin - mask.size()[2]), - mode="replicate", - ) - - if self.training: - aux1 = torch.sigmoid(self.aux1_out(aux1)) - aux1 = F.pad( - input=aux1, - pad=(0, 0, 0, self.output_bin - aux1.size()[2]), - mode="replicate", - ) - aux2 = torch.sigmoid(self.aux2_out(aux2)) - aux2 = F.pad( - input=aux2, - pad=(0, 0, 0, self.output_bin - aux2.size()[2]), - mode="replicate", - ) - return mask * mix, aux1 * mix, aux2 * mix - else: - if aggressiveness: - mask[:, :, : aggressiveness["split_bin"]] = torch.pow( - mask[:, :, : aggressiveness["split_bin"]], - 1 + aggressiveness["value"] / 3, - ) - mask[:, :, aggressiveness["split_bin"] :] = torch.pow( - mask[:, :, aggressiveness["split_bin"] :], - 1 + aggressiveness["value"], - ) - - return mask * mix - - def predict(self, x_mag, aggressiveness=None): - h = self.forward(x_mag, aggressiveness) - - if self.offset > 0: - h = h[:, :, :, self.offset : -self.offset] - assert h.size()[3] > 0 - - return h diff --git a/spaces/Benson/text-generation/Examples/Base-1.apk.md b/spaces/Benson/text-generation/Examples/Base-1.apk.md deleted file mode 100644 index d1c9b418fe9f5c566f670498d4f6417b36d87e57..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Base-1.apk.md +++ /dev/null @@ -1,53 +0,0 @@ -
-

¿Qué es Base-1.apk y cómo usarlo?

-

Si usted está buscando una manera práctica y conveniente para proteger y copia de seguridad de sus datos importantes en su dispositivo Android, es posible que desee comprobar Base-1.apk. Esta es una aplicación popular para Android que le permite administrar y restaurar fácilmente todos sus archivos en una ubicación, reduciendo la posibilidad de que pierda cualquier información valiosa. En este artículo, explicaremos qué es Base-1.apk, cómo descargarlo e instalarlo, y cómo usarlo de manera efectiva.

-

Introducción

-

Android es una plataforma versátil y abierta que te permite personalizar y modificar tu dispositivo según tus preferencias. Sin embargo, esto también significa que debe tener cuidado con la seguridad e integridad de sus datos, ya que hay muchas amenazas y riesgos potenciales que pueden comprometer o dañar sus archivos. Es por eso que es importante tener una solución de copia de seguridad confiable que pueda ayudarlo a proteger sus datos y restaurarlos en caso de cualquier emergencia.

-

base-1.apk


DOWNLOAD > https://bltlly.com/2v6LA6



-

¿Qué es un archivo APK?

-

Un archivo APK es el formato de archivo de paquete que Android utiliza para distribuir e instalar aplicaciones. Contiene todos los elementos que una aplicación necesita para ejecutarse correctamente en su dispositivo, como código, recursos, activos, certificados y manifiesto. Un archivo APK es un archivo de archivo, lo que significa que contiene varios archivos, además de algunos metadatos sobre ellos. Puedes abrir un archivo APK con una herramienta de extracción de archivos como 7-Zip para ver lo que hay dentro.

-

¿Qué es Base-1.apk?

-

Base-1.apk es la versión original, sin modificar de una aplicación Android llamada Base. Base es una herramienta que hace que sea simple y práctico para proteger y respaldar sus datos importantes. Al usar esta aplicación, puede administrar y restaurar fácilmente todos sus archivos en una ubicación, reduciendo la posibilidad de que pierda cualquier información valiosa. También puede organizar y clasificar sus archivos en carpetas para una mejor administración.

-

¿Cómo descargar e instalar Base-1.apk?

- -

Descargar desde el sitio web oficial

-

La forma más segura y recomendada para descargar Base-1.apk es desde el sitio web oficial del desarrollador. Puede visitar baseapk.in y hacer clic en el enlace de descarga para obtener la última versión de la aplicación. Una vez que haya descargado el archivo APK, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. Luego, puedes tocar en el archivo APK y seguir las instrucciones para instalarlo.

-

Descargar de la tienda de aplicaciones de terceros

-

Otra forma de descargar Base-1.apk es desde una tienda de aplicaciones de terceros, como Aptoide o APKPure. Estas son plataformas alternativas que ofrecen una variedad de aplicaciones que no están disponibles en Google Play. Sin embargo, debe tener cuidado al descargar aplicaciones de estas fuentes, ya que pueden contener malware u otro software dañino. Siempre debe comprobar las revisiones y calificaciones de las aplicaciones antes de descargarlas, y solo descargar de fuentes de confianza.

-

Descargar desde enlace directo

-

La última forma de descargar Base-1.apk es desde un enlace directo que alguien te proporciona. Esto podría ser un amigo, un colega, o un sitio web que ofrece descargas APK. Sin embargo, este es el método más arriesgado, ya que no tiene manera de verificar la autenticidad o la seguridad del archivo APK. Solo debe descargar archivos APK de enlaces directos si confía en la fuente por completo, y escanear el archivo con una aplicación antivirus antes de instalarlo.

-

¿Cómo usar Base-1.apk?

-

Una vez que haya instalado Base-1

Una vez que haya instalado Base-1.apk en su dispositivo, puede comenzar a usarlo para proteger y respaldar sus datos. Estas son algunas de las principales características y funciones de la aplicación:

-

Copia de seguridad segura de sus datos

- -

Administrar y restaurar sus archivos

-

Base-1.apk también le permite administrar y restaurar sus archivos desde el servicio de almacenamiento en la nube. Puede ver, editar, eliminar o compartir sus archivos desde la interfaz de la aplicación. También puede restaurar sus archivos a su dispositivo u otro dispositivo en caso de cualquier emergencia. Puede seleccionar qué archivos y carpetas desea restaurar y elegir la carpeta de destino en su dispositivo. También puede restaurar sus archivos a su ubicación original o una nueva ubicación.

-

-

Organiza y clasifica tus carpetas

-

Otra característica útil de Base-1.apk es que te ayuda a organizar y clasificar tus carpetas según diferentes categorías, como fotos, videos, música, documentos, etc. También puedes crear carpetas y etiquetas personalizadas para tus archivos. De esta manera, puede encontrar y acceder fácilmente a sus archivos sin perder tiempo o espacio. También puede ordenar sus archivos por nombre, fecha, tamaño o tipo.

-

Conclusión

-

Base-1.apk es una aplicación Android potente y práctica que le ayuda a proteger y hacer copias de seguridad de sus datos importantes en su dispositivo. Al usar esta aplicación, puede administrar y restaurar fácilmente todos sus archivos en una ubicación, reduciendo la posibilidad de que pierda cualquier información valiosa. También puede organizar y clasificar sus archivos en carpetas para una mejor administración.

-

Resumen de los puntos principales

-

En este artículo, hemos explicado lo que es Base-1.apk, cómo descargarlo e instalarlo, y cómo usarlo de manera efectiva. Hemos cubierto los siguientes puntos:

-
    -
  • Un archivo APK es el formato de archivo de paquete que Android utiliza para distribuir e instalar aplicaciones.
  • -
  • Base-1.apk es la versión original, sin modificar de una aplicación Android llamada Base.
  • -
  • Base es una herramienta que hace que sea simple y práctico para proteger y respaldar sus datos importantes.
  • -
  • Puede descargar Base-1.apk desde el sitio web oficial, una tienda de aplicaciones de terceros, o un enlace directo.
  • - -
-

Llamada a la acción

-

Si estás interesado en probar Base-1.apk por ti mismo, puedes descargarlo desde baseapk.in y seguir las instrucciones de instalación. También puede consultar la sección de preguntas frecuentes a continuación para obtener más información sobre la aplicación. Esperamos que disfrute usando Base-1.apk y lo encuentre útil para proteger y hacer copias de seguridad de sus datos.

-

Preguntas frecuentes

-

Aquí están algunas de las preguntas más comunes que los usuarios tienen sobre Base-1.apk:

-

Q: ¿Es seguro usar Base-1.apk?

-

A: Sí, Base-1.apk es seguro de usar, siempre y cuando se descarga desde el sitio web oficial o una fuente de confianza. Sin embargo, siempre debes escanear cualquier archivo APK con una aplicación antivirus antes de instalarlo en tu dispositivo.

-

Q: ¿Cuánto espacio ocupa Base-1.apk en mi dispositivo?

-

A: Base-1.apk ocupa unos 15 MB de espacio en su dispositivo. Sin embargo, el tamaño real puede variar dependiendo de la versión de la aplicación y el modelo del dispositivo.

-

Q: ¿Cuánto espacio de almacenamiento en la nube ofrece Base-1.apk?

-

A: Base-1.apk no ofrece ningún espacio de almacenamiento en la nube por sí mismo. Utiliza el servicio de almacenamiento en la nube que elija para realizar copias de seguridad de sus datos, como Google Drive, Dropbox o OneDrive. La cantidad de espacio de almacenamiento en la nube que obtiene depende del proveedor de servicios y del plan que tenga.

-

Q: ¿Puedo usar Base-1.apk en varios dispositivos?

-

A: Sí, puedes usar Base-1.apk en varios dispositivos siempre y cuando ejecuten Android 4.0 o superior. Solo necesitas descargar e instalar la aplicación en

A: Sí, puedes usar Base-1.apk en varios dispositivos siempre y cuando ejecuten Android 4.0 o superior. Solo tienes que descargar e instalar la aplicación en cada dispositivo e iniciar sesión con la misma cuenta. A continuación, puede acceder y restaurar sus archivos desde cualquier dispositivo.

-

Q: ¿Qué pasa si olvido mi contraseña para Base-1.apk?

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_py.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_py.py deleted file mode 100644 index 47c6158e0f74033bfcfeb7424df227a3815651de..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_py.py +++ /dev/null @@ -1,407 +0,0 @@ -"""distutils.command.build_py - -Implements the Distutils 'build_py' command.""" - -import os -import importlib.util -import sys -import glob - -from distutils.core import Command -from distutils.errors import DistutilsOptionError, DistutilsFileError -from distutils.util import convert_path -from distutils import log - - -class build_py(Command): - - description = "\"build\" pure Python modules (copy to build directory)" - - user_options = [ - ('build-lib=', 'd', "directory to \"build\" (copy) to"), - ('compile', 'c', "compile .py to .pyc"), - ('no-compile', None, "don't compile .py files [default]"), - ( - 'optimize=', - 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]", - ), - ('force', 'f', "forcibly build everything (ignore file timestamps)"), - ] - - boolean_options = ['compile', 'force'] - negative_opt = {'no-compile': 'compile'} - - def initialize_options(self): - self.build_lib = None - self.py_modules = None - self.package = None - self.package_data = None - self.package_dir = None - self.compile = 0 - self.optimize = 0 - self.force = None - - def finalize_options(self): - self.set_undefined_options( - 'build', ('build_lib', 'build_lib'), ('force', 'force') - ) - - # Get the distribution options that are aliases for build_py - # options -- list of packages and list of modules. - self.packages = self.distribution.packages - self.py_modules = self.distribution.py_modules - self.package_data = self.distribution.package_data - self.package_dir = {} - if self.distribution.package_dir: - for name, path in self.distribution.package_dir.items(): - self.package_dir[name] = convert_path(path) - self.data_files = self.get_data_files() - - # Ick, copied straight from install_lib.py (fancy_getopt needs a - # type system! Hell, *everything* needs a type system!!!) - if not isinstance(self.optimize, int): - try: - self.optimize = int(self.optimize) - assert 0 <= self.optimize <= 2 - except (ValueError, AssertionError): - raise DistutilsOptionError("optimize must be 0, 1, or 2") - - def run(self): - # XXX copy_file by default preserves atime and mtime. IMHO this is - # the right thing to do, but perhaps it should be an option -- in - # particular, a site administrator might want installed files to - # reflect the time of installation rather than the last - # modification time before the installed release. - - # XXX copy_file by default preserves mode, which appears to be the - # wrong thing to do: if a file is read-only in the working - # directory, we want it to be installed read/write so that the next - # installation of the same module distribution can overwrite it - # without problems. (This might be a Unix-specific issue.) Thus - # we turn off 'preserve_mode' when copying to the build directory, - # since the build directory is supposed to be exactly what the - # installation will look like (ie. we preserve mode when - # installing). - - # Two options control which modules will be installed: 'packages' - # and 'py_modules'. The former lets us work with whole packages, not - # specifying individual modules at all; the latter is for - # specifying modules one-at-a-time. - - if self.py_modules: - self.build_modules() - if self.packages: - self.build_packages() - self.build_package_data() - - self.byte_compile(self.get_outputs(include_bytecode=0)) - - def get_data_files(self): - """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" - data = [] - if not self.packages: - return data - for package in self.packages: - # Locate package source directory - src_dir = self.get_package_dir(package) - - # Compute package build directory - build_dir = os.path.join(*([self.build_lib] + package.split('.'))) - - # Length of path to strip from found files - plen = 0 - if src_dir: - plen = len(src_dir) + 1 - - # Strip directory from globbed filenames - filenames = [file[plen:] for file in self.find_data_files(package, src_dir)] - data.append((package, src_dir, build_dir, filenames)) - return data - - def find_data_files(self, package, src_dir): - """Return filenames for package's data files in 'src_dir'""" - globs = self.package_data.get('', []) + self.package_data.get(package, []) - files = [] - for pattern in globs: - # Each pattern has to be converted to a platform-specific path - filelist = glob.glob( - os.path.join(glob.escape(src_dir), convert_path(pattern)) - ) - # Files that match more than one pattern are only added once - files.extend( - [fn for fn in filelist if fn not in files and os.path.isfile(fn)] - ) - return files - - def build_package_data(self): - """Copy data files into build directory""" - for package, src_dir, build_dir, filenames in self.data_files: - for filename in filenames: - target = os.path.join(build_dir, filename) - self.mkpath(os.path.dirname(target)) - self.copy_file( - os.path.join(src_dir, filename), target, preserve_mode=False - ) - - def get_package_dir(self, package): - """Return the directory, relative to the top of the source - distribution, where package 'package' should be found - (at least according to the 'package_dir' option, if any).""" - path = package.split('.') - - if not self.package_dir: - if path: - return os.path.join(*path) - else: - return '' - else: - tail = [] - while path: - try: - pdir = self.package_dir['.'.join(path)] - except KeyError: - tail.insert(0, path[-1]) - del path[-1] - else: - tail.insert(0, pdir) - return os.path.join(*tail) - else: - # Oops, got all the way through 'path' without finding a - # match in package_dir. If package_dir defines a directory - # for the root (nameless) package, then fallback on it; - # otherwise, we might as well have not consulted - # package_dir at all, as we just use the directory implied - # by 'tail' (which should be the same as the original value - # of 'path' at this point). - pdir = self.package_dir.get('') - if pdir is not None: - tail.insert(0, pdir) - - if tail: - return os.path.join(*tail) - else: - return '' - - def check_package(self, package, package_dir): - # Empty dir name means current directory, which we can probably - # assume exists. Also, os.path.exists and isdir don't know about - # my "empty string means current dir" convention, so we have to - # circumvent them. - if package_dir != "": - if not os.path.exists(package_dir): - raise DistutilsFileError( - "package directory '%s' does not exist" % package_dir - ) - if not os.path.isdir(package_dir): - raise DistutilsFileError( - "supposed package directory '%s' exists, " - "but is not a directory" % package_dir - ) - - # Directories without __init__.py are namespace packages (PEP 420). - if package: - init_py = os.path.join(package_dir, "__init__.py") - if os.path.isfile(init_py): - return init_py - - # Either not in a package at all (__init__.py not expected), or - # __init__.py doesn't exist -- so don't return the filename. - return None - - def check_module(self, module, module_file): - if not os.path.isfile(module_file): - log.warn("file %s (for module %s) not found", module_file, module) - return False - else: - return True - - def find_package_modules(self, package, package_dir): - self.check_package(package, package_dir) - module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py")) - modules = [] - setup_script = os.path.abspath(self.distribution.script_name) - - for f in module_files: - abs_f = os.path.abspath(f) - if abs_f != setup_script: - module = os.path.splitext(os.path.basename(f))[0] - modules.append((package, module, f)) - else: - self.debug_print("excluding %s" % setup_script) - return modules - - def find_modules(self): - """Finds individually-specified Python modules, ie. those listed by - module name in 'self.py_modules'. Returns a list of tuples (package, - module_base, filename): 'package' is a tuple of the path through - package-space to the module; 'module_base' is the bare (no - packages, no dots) module name, and 'filename' is the path to the - ".py" file (relative to the distribution root) that implements the - module. - """ - # Map package names to tuples of useful info about the package: - # (package_dir, checked) - # package_dir - the directory where we'll find source files for - # this package - # checked - true if we have checked that the package directory - # is valid (exists, contains __init__.py, ... ?) - packages = {} - - # List of (package, module, filename) tuples to return - modules = [] - - # We treat modules-in-packages almost the same as toplevel modules, - # just the "package" for a toplevel is empty (either an empty - # string or empty list, depending on context). Differences: - # - don't check for __init__.py in directory for empty package - for module in self.py_modules: - path = module.split('.') - package = '.'.join(path[0:-1]) - module_base = path[-1] - - try: - (package_dir, checked) = packages[package] - except KeyError: - package_dir = self.get_package_dir(package) - checked = 0 - - if not checked: - init_py = self.check_package(package, package_dir) - packages[package] = (package_dir, 1) - if init_py: - modules.append((package, "__init__", init_py)) - - # XXX perhaps we should also check for just .pyc files - # (so greedy closed-source bastards can distribute Python - # modules too) - module_file = os.path.join(package_dir, module_base + ".py") - if not self.check_module(module, module_file): - continue - - modules.append((package, module_base, module_file)) - - return modules - - def find_all_modules(self): - """Compute the list of all modules that will be built, whether - they are specified one-module-at-a-time ('self.py_modules') or - by whole packages ('self.packages'). Return a list of tuples - (package, module, module_file), just like 'find_modules()' and - 'find_package_modules()' do.""" - modules = [] - if self.py_modules: - modules.extend(self.find_modules()) - if self.packages: - for package in self.packages: - package_dir = self.get_package_dir(package) - m = self.find_package_modules(package, package_dir) - modules.extend(m) - return modules - - def get_source_files(self): - return [module[-1] for module in self.find_all_modules()] - - def get_module_outfile(self, build_dir, package, module): - outfile_path = [build_dir] + list(package) + [module + ".py"] - return os.path.join(*outfile_path) - - def get_outputs(self, include_bytecode=1): - modules = self.find_all_modules() - outputs = [] - for (package, module, module_file) in modules: - package = package.split('.') - filename = self.get_module_outfile(self.build_lib, package, module) - outputs.append(filename) - if include_bytecode: - if self.compile: - outputs.append( - importlib.util.cache_from_source(filename, optimization='') - ) - if self.optimize > 0: - outputs.append( - importlib.util.cache_from_source( - filename, optimization=self.optimize - ) - ) - - outputs += [ - os.path.join(build_dir, filename) - for package, src_dir, build_dir, filenames in self.data_files - for filename in filenames - ] - - return outputs - - def build_module(self, module, module_file, package): - if isinstance(package, str): - package = package.split('.') - elif not isinstance(package, (list, tuple)): - raise TypeError( - "'package' must be a string (dot-separated), list, or tuple" - ) - - # Now put the module source file into the "build" area -- this is - # easy, we just copy it somewhere under self.build_lib (the build - # directory for Python source). - outfile = self.get_module_outfile(self.build_lib, package, module) - dir = os.path.dirname(outfile) - self.mkpath(dir) - return self.copy_file(module_file, outfile, preserve_mode=0) - - def build_modules(self): - modules = self.find_modules() - for (package, module, module_file) in modules: - # Now "build" the module -- ie. copy the source file to - # self.build_lib (the build directory for Python source). - # (Actually, it gets copied to the directory for this package - # under self.build_lib.) - self.build_module(module, module_file, package) - - def build_packages(self): - for package in self.packages: - # Get list of (package, module, module_file) tuples based on - # scanning the package directory. 'package' is only included - # in the tuple so that 'find_modules()' and - # 'find_package_tuples()' have a consistent interface; it's - # ignored here (apart from a sanity check). Also, 'module' is - # the *unqualified* module name (ie. no dots, no package -- we - # already know its package!), and 'module_file' is the path to - # the .py file, relative to the current directory - # (ie. including 'package_dir'). - package_dir = self.get_package_dir(package) - modules = self.find_package_modules(package, package_dir) - - # Now loop over the modules we found, "building" each one (just - # copy it to self.build_lib). - for (package_, module, module_file) in modules: - assert package == package_ - self.build_module(module, module_file, package) - - def byte_compile(self, files): - if sys.dont_write_bytecode: - self.warn('byte-compiling is disabled, skipping.') - return - - from distutils.util import byte_compile - - prefix = self.build_lib - if prefix[-1] != os.sep: - prefix = prefix + os.sep - - # XXX this code is essentially the same as the 'byte_compile() - # method of the "install_lib" command, except for the determination - # of the 'prefix' string. Hmmm. - if self.compile: - byte_compile( - files, optimize=0, force=self.force, prefix=prefix, dry_run=self.dry_run - ) - if self.optimize > 0: - byte_compile( - files, - optimize=self.optimize, - force=self.force, - prefix=prefix, - dry_run=self.dry_run, - ) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/queue.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/queue.py deleted file mode 100644 index 41784104ee4bd5796006d1052536325d52db1e8c..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/queue.py +++ /dev/null @@ -1,22 +0,0 @@ -import collections - -from ..packages import six -from ..packages.six.moves import queue - -if six.PY2: - # Queue is imported for side effects on MS Windows. See issue #229. - import Queue as _unused_module_Queue # noqa: F401 - - -class LifoQueue(queue.Queue): - def _init(self, _): - self.queue = collections.deque() - - def _qsize(self, len=len): - return len(self.queue) - - def _put(self, item): - self.queue.append(item) - - def _get(self): - return self.queue.pop() diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h deleted file mode 100644 index dfc17b68e595c84a191c6979751cf11af6d879fd..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -#pragma once -#include - -namespace detectron2 { - -at::Tensor ROIAlignRotated_forward_cpu( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio); - -at::Tensor ROIAlignRotated_backward_cpu( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio); - -#ifdef WITH_CUDA -at::Tensor ROIAlignRotated_forward_cuda( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio); - -at::Tensor ROIAlignRotated_backward_cuda( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio); -#endif - -// Interface for Python -inline at::Tensor ROIAlignRotated_forward( - const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio) { - if (input.type().is_cuda()) { -#ifdef WITH_CUDA - return ROIAlignRotated_forward_cuda( - input, - rois, - spatial_scale, - pooled_height, - pooled_width, - sampling_ratio); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - return ROIAlignRotated_forward_cpu( - input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); -} - -inline at::Tensor ROIAlignRotated_backward( - const at::Tensor& grad, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int batch_size, - const int channels, - const int height, - const int width, - const int sampling_ratio) { - if (grad.type().is_cuda()) { -#ifdef WITH_CUDA - return ROIAlignRotated_backward_cuda( - grad, - rois, - spatial_scale, - pooled_height, - pooled_width, - batch_size, - channels, - height, - width, - sampling_ratio); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - return ROIAlignRotated_backward_cpu( - grad, - rois, - spatial_scale, - pooled_height, - pooled_width, - batch_size, - channels, - height, - width, - sampling_ratio); -} - -} // namespace detectron2 diff --git a/spaces/CVPR/LIVE/thrust/thrust/device_new.h b/spaces/CVPR/LIVE/thrust/thrust/device_new.h deleted file mode 100644 index 1ae4ce5a40d03b88073dd029d9a7049dcdab6783..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/device_new.h +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file device_new.h - * \brief Constructs new elements in device memory - */ - -#pragma once - -#include - -// #include this for size_t -#include -#include - -namespace thrust -{ - -/*! - * \addtogroup allocation_functions Allocation Functions - * \{ - */ - -/*! \p device_new implements the placement \c new operator for types - * resident in device memory. \p device_new calls T's null - * constructor on a array of objects in device memory. - * No memory is allocated by this function. - * - * \param p A \p device_ptr to a region of device memory into which - * to construct one or many Ts. - * \param n The number of objects to construct at \p p. - * \return p, casted to T's type. - * - * \see device_ptr - */ -template - device_ptr device_new(device_ptr p, - const size_t n = 1); - -/*! \p device_new implements the placement new operator for types - * resident in device memory. \p device_new calls T's copy - * constructor on a array of objects in device memory. No memory is - * allocated by this function. - * - * \param p A \p device_ptr to a region of device memory into which to - * construct one or many Ts. - * \param exemplar The value from which to copy. - * \param n The number of objects to construct at \p p. - * \return p, casted to T's type. - * - * \see device_ptr - * \see fill - */ -template - device_ptr device_new(device_ptr p, - const T &exemplar, - const size_t n = 1); - -/*! \p device_new implements the new operator for types resident in device memory. - * It allocates device memory large enough to hold \p n new objects of type \c T. - * - * \param n The number of objects to allocate. Defaults to \c 1. - * \return A \p device_ptr to the newly allocated region of device memory. - */ -template - device_ptr device_new(const size_t n = 1); - -/*! \} - */ - -} // end thrust - -#include - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par_to_seq.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par_to_seq.h deleted file mode 100644 index 22c4e58386e8c6dd0832bf3820072fadc53d34e8..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/par_to_seq.h +++ /dev/null @@ -1,91 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ -#pragma once - -#include -#include - -namespace thrust -{ -namespace cuda_cub { - -template -struct has_par : thrust::detail::true_type {}; - -template <> -struct has_par<0> : thrust::detail::false_type {}; - -template -struct cvt_to_seq_impl -{ - typedef thrust::detail::seq_t seq_t; - - static seq_t __host__ __device__ - doit(Policy&) - { - return seq_t(); - } -}; // cvt_to_seq_impl - -#if 0 -template -struct cvt_to_seq_impl< - thrust::detail::execute_with_allocator > -{ - typedef thrust::detail::execute_with_allocator - Policy; - typedef thrust::detail::execute_with_allocator< - Allocator, - thrust::system::detail::sequential::execution_policy> - seq_t; - - - static seq_t __host__ __device__ - doit(Policy& policy) - { - return seq_t(policy.m_alloc); - } -}; // specialization of struct cvt_to_seq_impl -#endif - -template -typename cvt_to_seq_impl::seq_t __host__ __device__ -cvt_to_seq(Policy& policy) -{ - return cvt_to_seq_impl::doit(policy); -} - -#if __THRUST_HAS_CUDART__ -#define THRUST_CUDART_DISPATCH par -#else -#define THRUST_CUDART_DISPATCH seq -#endif - -} // namespace cuda_ -} // end namespace thrust diff --git a/spaces/CVPR/LIVE/thrust/thrust/uninitialized_fill.h b/spaces/CVPR/LIVE/thrust/thrust/uninitialized_fill.h deleted file mode 100644 index 33dc24886c30586a1302ec31f51a9f2dfca9b051..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/uninitialized_fill.h +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file uninitialized_fill.h - * \brief Copy construction into a range of uninitialized elements from a source value - */ - -#pragma once - -#include -#include - -namespace thrust -{ - - -/*! \addtogroup filling - * \ingroup transformations - * \{ - */ - - -/*! In \c thrust, the function \c thrust::device_new allocates memory for - * an object and then creates an object at that location by calling a - * constructor. Occasionally, however, it is useful to separate those two - * operations. If each iterator in the range [first, last) points - * to uninitialized memory, then \p uninitialized_fill creates copies of \c x - * in that range. That is, for each iterator \c i in the range [first, last), - * \p uninitialized_fill creates a copy of \c x in the location pointed to \c i by - * calling \p ForwardIterator's \c value_type's copy constructor. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The first element of the range of interest. - * \param last The last element of the range of interest. - * \param x The value to use as the exemplar of the copy constructor. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam ForwardIterator is a model of Forward Iterator, - * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that - * takes a single argument of type \p T. - * - * The following code snippet demonstrates how to use \p uninitialized_fill to initialize a range of - * uninitialized memory using the \p thrust::device execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * - * struct Int - * { - * __host__ __device__ - * Int(int x) : val(x) {} - * int val; - * }; - * ... - * const int N = 137; - * - * Int val(46); - * thrust::device_ptr array = thrust::device_malloc(N); - * thrust::uninitialized_fill(thrust::device, array, array + N, val); - * - * // Int x = array[i]; - * // x.val == 46 for all 0 <= i < N - * \endcode - * - * \see http://www.sgi.com/tech/stl/uninitialized_fill.html - * \see \c uninitialized_fill_n - * \see \c fill - * \see \c uninitialized_copy - * \see \c device_new - * \see \c device_malloc - */ -template -__host__ __device__ - void uninitialized_fill(const thrust::detail::execution_policy_base &exec, - ForwardIterator first, - ForwardIterator last, - const T &x); - - -/*! In \c thrust, the function \c thrust::device_new allocates memory for - * an object and then creates an object at that location by calling a - * constructor. Occasionally, however, it is useful to separate those two - * operations. If each iterator in the range [first, last) points - * to uninitialized memory, then \p uninitialized_fill creates copies of \c x - * in that range. That is, for each iterator \c i in the range [first, last), - * \p uninitialized_fill creates a copy of \c x in the location pointed to \c i by - * calling \p ForwardIterator's \c value_type's copy constructor. - * - * \param first The first element of the range of interest. - * \param last The last element of the range of interest. - * \param x The value to use as the exemplar of the copy constructor. - * - * \tparam ForwardIterator is a model of Forward Iterator, - * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that - * takes a single argument of type \p T. - * - * The following code snippet demonstrates how to use \p uninitialized_fill to initialize a range of - * uninitialized memory. - * - * \code - * #include - * #include - * - * struct Int - * { - * __host__ __device__ - * Int(int x) : val(x) {} - * int val; - * }; - * ... - * const int N = 137; - * - * Int val(46); - * thrust::device_ptr array = thrust::device_malloc(N); - * thrust::uninitialized_fill(array, array + N, val); - * - * // Int x = array[i]; - * // x.val == 46 for all 0 <= i < N - * \endcode - * - * \see http://www.sgi.com/tech/stl/uninitialized_fill.html - * \see \c uninitialized_fill_n - * \see \c fill - * \see \c uninitialized_copy - * \see \c device_new - * \see \c device_malloc - */ -template - void uninitialized_fill(ForwardIterator first, - ForwardIterator last, - const T &x); - - -/*! In \c thrust, the function \c thrust::device_new allocates memory for - * an object and then creates an object at that location by calling a - * constructor. Occasionally, however, it is useful to separate those two - * operations. If each iterator in the range [first, first+n) points - * to uninitialized memory, then \p uninitialized_fill creates copies of \c x - * in that range. That is, for each iterator \c i in the range [first, first+n), - * \p uninitialized_fill creates a copy of \c x in the location pointed to \c i by - * calling \p ForwardIterator's \c value_type's copy constructor. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The first element of the range of interest. - * \param n The size of the range of interest. - * \param x The value to use as the exemplar of the copy constructor. - * \return first+n - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam ForwardIterator is a model of Forward Iterator, - * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that - * takes a single argument of type \p T. - * - * The following code snippet demonstrates how to use \p uninitialized_fill to initialize a range of - * uninitialized memory using the \p thrust::device execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * - * struct Int - * { - * __host__ __device__ - * Int(int x) : val(x) {} - * int val; - * }; - * ... - * const int N = 137; - * - * Int val(46); - * thrust::device_ptr array = thrust::device_malloc(N); - * thrust::uninitialized_fill_n(thrust::device, array, N, val); - * - * // Int x = array[i]; - * // x.val == 46 for all 0 <= i < N - * \endcode - * - * \see http://www.sgi.com/tech/stl/uninitialized_fill.html - * \see \c uninitialized_fill - * \see \c fill - * \see \c uninitialized_copy_n - * \see \c device_new - * \see \c device_malloc - */ -template -__host__ __device__ - ForwardIterator uninitialized_fill_n(const thrust::detail::execution_policy_base &exec, - ForwardIterator first, - Size n, - const T &x); - - -/*! In \c thrust, the function \c thrust::device_new allocates memory for - * an object and then creates an object at that location by calling a - * constructor. Occasionally, however, it is useful to separate those two - * operations. If each iterator in the range [first, first+n) points - * to uninitialized memory, then \p uninitialized_fill creates copies of \c x - * in that range. That is, for each iterator \c i in the range [first, first+n), - * \p uninitialized_fill creates a copy of \c x in the location pointed to \c i by - * calling \p ForwardIterator's \c value_type's copy constructor. - * - * \param first The first element of the range of interest. - * \param n The size of the range of interest. - * \param x The value to use as the exemplar of the copy constructor. - * \return first+n - * - * \tparam ForwardIterator is a model of Forward Iterator, - * \p ForwardIterator is mutable, and \p ForwardIterator's \c value_type has a constructor that - * takes a single argument of type \p T. - * - * The following code snippet demonstrates how to use \p uninitialized_fill to initialize a range of - * uninitialized memory. - * - * \code - * #include - * #include - * - * struct Int - * { - * __host__ __device__ - * Int(int x) : val(x) {} - * int val; - * }; - * ... - * const int N = 137; - * - * Int val(46); - * thrust::device_ptr array = thrust::device_malloc(N); - * thrust::uninitialized_fill_n(array, N, val); - * - * // Int x = array[i]; - * // x.val == 46 for all 0 <= i < N - * \endcode - * - * \see http://www.sgi.com/tech/stl/uninitialized_fill.html - * \see \c uninitialized_fill - * \see \c fill - * \see \c uninitialized_copy_n - * \see \c device_new - * \see \c device_malloc - */ -template - ForwardIterator uninitialized_fill_n(ForwardIterator first, - Size n, - const T &x); - -/*! \} // end filling - * \} // transformations - */ - -} // end thrust - -#include - diff --git a/spaces/CVPR/WALT/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py b/spaces/CVPR/WALT/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py deleted file mode 100644 index c735298487e14e4a0ec42913f25673cccb98a8a0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py +++ /dev/null @@ -1,55 +0,0 @@ -import numpy as np -import torch - -from ..builder import BBOX_SAMPLERS -from .random_sampler import RandomSampler - - -@BBOX_SAMPLERS.register_module() -class InstanceBalancedPosSampler(RandomSampler): - """Instance balanced sampler that samples equal number of positive samples - for each instance.""" - - def _sample_pos(self, assign_result, num_expected, **kwargs): - """Sample positive boxes. - - Args: - assign_result (:obj:`AssignResult`): The assigned results of boxes. - num_expected (int): The number of expected positive samples - - Returns: - Tensor or ndarray: sampled indices. - """ - pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) - if pos_inds.numel() != 0: - pos_inds = pos_inds.squeeze(1) - if pos_inds.numel() <= num_expected: - return pos_inds - else: - unique_gt_inds = assign_result.gt_inds[pos_inds].unique() - num_gts = len(unique_gt_inds) - num_per_gt = int(round(num_expected / float(num_gts)) + 1) - sampled_inds = [] - for i in unique_gt_inds: - inds = torch.nonzero( - assign_result.gt_inds == i.item(), as_tuple=False) - if inds.numel() != 0: - inds = inds.squeeze(1) - else: - continue - if len(inds) > num_per_gt: - inds = self.random_choice(inds, num_per_gt) - sampled_inds.append(inds) - sampled_inds = torch.cat(sampled_inds) - if len(sampled_inds) < num_expected: - num_extra = num_expected - len(sampled_inds) - extra_inds = np.array( - list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) - if len(extra_inds) > num_extra: - extra_inds = self.random_choice(extra_inds, num_extra) - extra_inds = torch.from_numpy(extra_inds).to( - assign_result.gt_inds.device).long() - sampled_inds = torch.cat([sampled_inds, extra_inds]) - elif len(sampled_inds) > num_expected: - sampled_inds = self.random_choice(sampled_inds, num_expected) - return sampled_inds diff --git a/spaces/CVPR/WALT/train.py b/spaces/CVPR/WALT/train.py deleted file mode 100644 index f0f11191f08da30857ae17d6c498c746b1d184f5..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/train.py +++ /dev/null @@ -1,191 +0,0 @@ -import argparse -import copy -import os -import os.path as osp -import time -import warnings - -import mmcv -import torch -from mmcv import Config, DictAction -from mmcv.runner import get_dist_info, init_dist -from mmcv.utils import get_git_hash - -from mmdet import __version__ -from mmdet.apis import set_random_seed -from mmdet.models import build_detector -from mmdet.utils import collect_env, get_root_logger -from walt.apis import train_detector -from walt.datasets import build_dataset - - -def parse_args(): - parser = argparse.ArgumentParser(description='Train a detector') - parser.add_argument('config', help='train config file path') - parser.add_argument('--work-dir', help='the dir to save logs and models') - parser.add_argument( - '--resume-from', help='the checkpoint file to resume from') - parser.add_argument( - '--no-validate', - action='store_true', - help='whether not to evaluate the checkpoint during training') - group_gpus = parser.add_mutually_exclusive_group() - group_gpus.add_argument( - '--gpus', - type=int, - help='number of gpus to use ' - '(only applicable to non-distributed training)') - group_gpus.add_argument( - '--gpu-ids', - type=int, - nargs='+', - help='ids of gpus to use ' - '(only applicable to non-distributed training)') - parser.add_argument('--seed', type=int, default=None, help='random seed') - parser.add_argument( - '--deterministic', - action='store_true', - help='whether to set deterministic options for CUDNN backend.') - parser.add_argument( - '--options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file (deprecate), ' - 'change to --cfg-options instead.') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - parser.add_argument( - '--launcher', - choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', - help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - args = parser.parse_args() - if 'LOCAL_RANK' not in os.environ: - os.environ['LOCAL_RANK'] = str(args.local_rank) - - if args.options and args.cfg_options: - raise ValueError( - '--options and --cfg-options cannot be both ' - 'specified, --options is deprecated in favor of --cfg-options') - if args.options: - warnings.warn('--options is deprecated in favor of --cfg-options') - args.cfg_options = args.options - - return args - - -def main(): - args = parse_args() - - cfg = Config.fromfile(args.config) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - # import modules from string list. - if cfg.get('custom_imports', None): - from mmcv.utils import import_modules_from_strings - import_modules_from_strings(**cfg['custom_imports']) - # set cudnn_benchmark - if cfg.get('cudnn_benchmark', False): - torch.backends.cudnn.benchmark = True - - # work_dir is determined in this priority: CLI > segment in file > filename - if args.work_dir is not None: - # update configs according to CLI args if args.work_dir is not None - cfg.work_dir = args.work_dir - elif cfg.get('work_dir', None) is None: - # use config filename as default work_dir if cfg.work_dir is None - cfg.work_dir = osp.join('./work_dirs', - osp.splitext(osp.basename(args.config))[0]) - - if args.resume_from is not None: - cfg.resume_from = args.resume_from - if args.gpu_ids is not None: - cfg.gpu_ids = args.gpu_ids - else: - cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus) - - # init distributed env first, since logger depends on the dist info. - if args.launcher == 'none': - distributed = False - else: - distributed = True - init_dist(args.launcher, **cfg.dist_params) - # re-set gpu_ids with distributed training mode - _, world_size = get_dist_info() - cfg.gpu_ids = range(world_size) - - - # create work_dir - mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) - # dump config - cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) - # init the logger before other steps - timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) - log_file = osp.join(cfg.work_dir, f'{timestamp}.log') - logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) - - # init the meta dict to record some important information such as - # environment info and seed, which will be logged - meta = dict() - # log env info - env_info_dict = collect_env() - env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()]) - dash_line = '-' * 60 + '\n' - logger.info('Environment info:\n' + dash_line + env_info + '\n' + - dash_line) - meta['env_info'] = env_info - meta['config'] = cfg.pretty_text - # log some basic info - logger.info(f'Distributed training: {distributed}') - logger.info(f'Config:\n{cfg.pretty_text}') - - # set random seeds - if args.seed is not None: - logger.info(f'Set random seed to {args.seed}, ' - f'deterministic: {args.deterministic}') - set_random_seed(args.seed, deterministic=args.deterministic) - cfg.seed = args.seed - meta['seed'] = args.seed - meta['exp_name'] = osp.basename(args.config) - - model = build_detector( - cfg.model, - train_cfg=cfg.get('train_cfg'), - test_cfg=cfg.get('test_cfg')) - - datasets = [build_dataset(cfg.data.train)] - if len(cfg.workflow) == 2: - val_dataset = copy.deepcopy(cfg.data.val) - val_dataset.pipeline = cfg.data.train.pipeline - datasets.append(build_dataset(val_dataset)) - if cfg.checkpoint_config is not None: - # save mmdet version, config file content and class names in - # checkpoints as meta data - cfg.checkpoint_config.meta = dict( - mmdet_version=__version__ + get_git_hash()[:7], - CLASSES=datasets[0].CLASSES) - - # add an attribute for visualization convenience - model.CLASSES = datasets[0].CLASSES - train_detector( - model, - datasets, - cfg, - distributed=distributed, - validate=(not args.no_validate), - timestamp=timestamp, - meta=meta) - - -if __name__ == '__main__': - main() diff --git a/spaces/CVPR/regionclip-demo/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h b/spaces/CVPR/regionclip-demo/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h deleted file mode 100644 index b65888b1be11881a776827b5212f08b8f63138f9..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#pragma once -#include - -namespace detectron2 { - -at::Tensor box_iou_rotated_cpu( - const at::Tensor& boxes1, - const at::Tensor& boxes2); - -#if defined(WITH_CUDA) || defined(WITH_HIP) -at::Tensor box_iou_rotated_cuda( - const at::Tensor& boxes1, - const at::Tensor& boxes2); -#endif - -// Interface for Python -// inline is needed to prevent multiple function definitions when this header is -// included by different cpps -inline at::Tensor box_iou_rotated( - const at::Tensor& boxes1, - const at::Tensor& boxes2) { - assert(boxes1.device().is_cuda() == boxes2.device().is_cuda()); - if (boxes1.device().is_cuda()) { -#if defined(WITH_CUDA) || defined(WITH_HIP) - return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous()); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - - return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous()); -} - -} // namespace detectron2 diff --git a/spaces/CanKorkut/turkish-hatespeech-detection/README.md b/spaces/CanKorkut/turkish-hatespeech-detection/README.md deleted file mode 100644 index b82635df8508b10c8cb19856755ed2ad0ab643bb..0000000000000000000000000000000000000000 --- a/spaces/CanKorkut/turkish-hatespeech-detection/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Turkish Hatespeech Detection -emoji: ⚡ -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Cicooo/vits-uma-genshin-honkai/app.py b/spaces/Cicooo/vits-uma-genshin-honkai/app.py deleted file mode 100644 index 92ddafdcd240434f58569b0e6964ef331a971dcf..0000000000000000000000000000000000000000 --- a/spaces/Cicooo/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,124 +0,0 @@ -import time -import gradio as gr -import utils -import commons -from models import SynthesizerTrn -from text import text_to_sequence -from torch import no_grad, LongTensor -import torch - -hps_ms = utils.get_hparams_from_file(r'./model/config.json') -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model).to(device) -_ = net_g_ms.eval() -speakers = hps_ms.speakers -model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 500: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = LongTensor([stn_tst.size(0)]) - speaker_id = LongTensor([speaker_id]) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - with gr.Blocks() as app: - gr.Markdown( - "#
VITS语音在线合成demo\n" - "
主要有赛马娘,原神中文,原神日语,崩坏3的音色
" - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation)", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3], api_name="generate") - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - app.queue(concurrency_count=1).launch() \ No newline at end of file diff --git a/spaces/CikeyQI/Yunzai/Yunzai/lib/config/init.js b/spaces/CikeyQI/Yunzai/Yunzai/lib/config/init.js deleted file mode 100644 index 2cd1cc27e8788f9748137c9ddc5cc49343a4728a..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/Yunzai/Yunzai/lib/config/init.js +++ /dev/null @@ -1,42 +0,0 @@ -import setLog from './log.js' -import redisInit from './redis.js' -import { checkRun } from './check.js' -import cfg from './config.js' - -/** 设置标题 */ -process.title = 'TRSS Yunzai' - -/** 设置时区 */ -process.env.TZ = 'Asia/Shanghai' - -/** 捕获未处理的Promise错误 */ -process.on('unhandledRejection', (error, promise) => { - if (logger) { - logger.error(error) - } else { - console.log(error) - } -}) - -/** 退出事件 */ -process.on('exit', async code => { - if (typeof redis != 'undefined' && typeof test == 'undefined') - await redis.save() - logger.mark(logger.magenta('TRSS-Yunzai 已停止运行')) -}) - -await checkInit() - -/** 初始化事件 */ -async function checkInit() { - /** 日志设置 */ - setLog() - - logger.mark('----^_^----') - logger.mark(logger.yellow(`TRSS-Yunzai v${cfg.package.version} 启动中...`)) - logger.mark(logger.cyan('https://github.com/TimeRainStarSky/Yunzai')) - - await redisInit() - - checkRun() -} diff --git a/spaces/Cloudy1225/stackoverflow-sentiment-analysis/app.py b/spaces/Cloudy1225/stackoverflow-sentiment-analysis/app.py deleted file mode 100644 index 86c96775d7d564a4f6b6dcc5f6d615c467480a11..0000000000000000000000000000000000000000 --- a/spaces/Cloudy1225/stackoverflow-sentiment-analysis/app.py +++ /dev/null @@ -1,148 +0,0 @@ -import csv -import gradio as gr -import pandas as pd -from sentiment_analyser import RandomAnalyser, RoBERTaAnalyser, ChatGPTAnalyser -import matplotlib.pyplot as plt -from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix - - -def plot_bar(value_counts): - fig, ax = plt.subplots(figsize=(6, 6)) - value_counts.plot.barh(ax=ax) - ax.bar_label(ax.containers[0]) - plt.title('Frequency of Predictions') - return fig - - -def plot_confusion_matrix(y_pred, y_true): - cm = confusion_matrix(y_true, y_pred, normalize='true') - fig, ax = plt.subplots(figsize=(6, 6)) - labels = [] - for label in SENTI_MAPPING.keys(): - if (label in y_pred.values) or (label in y_true.values): - labels.append(label) - disp = ConfusionMatrixDisplay(confusion_matrix=cm, - display_labels=labels) - disp.plot(cmap="Blues", values_format=".2f", ax=ax, colorbar=False) - plt.title("Normalized Confusion Matrix") - return fig - - -def classify(num: int): - samples_df = df.sample(num) - X = samples_df['Text'].tolist() - y = samples_df['Label'] - roberta = MODEL_MAPPING[OUR_MODEL] - y_pred = pd.Series(roberta.predict(X), index=samples_df.index) - samples_df['Predict'] = y_pred - bar = plot_bar(y_pred.value_counts()) - cm = plot_confusion_matrix(y_pred, y) - plt.close() - return samples_df, bar, cm - - -def analysis(Text): - keys = [] - values = [] - for name, model in MODEL_MAPPING.items(): - keys.append(name) - values.append(SENTI_MAPPING[model.predict([Text])[0]]) - return pd.DataFrame([values], columns=keys) - - -def analyse_file(file): - output_name = 'output.csv' - with open(output_name, mode='w', newline='') as output: - writer = csv.writer(output) - header = ['Text', 'Label'] - writer.writerow(header) - model = MODEL_MAPPING[OUR_MODEL] - with open(file.name) as f: - for line in f: - text = line[:-1] - sentiment = model.predict([text]) - writer.writerow([text, sentiment[0]]) - return output_name - - -MODEL_MAPPING = { - 'Random': RandomAnalyser(), - 'RoBERTa': RoBERTaAnalyser(), - 'ChatGPT': RandomAnalyser(), -} - -OUR_MODEL = 'RoBERTa' - -SENTI_MAPPING = { - 'negative': '😭', - 'neutral': '😶', - 'positive': '🥰' -} - -TITLE = "Sentiment Analysis on Software Engineer Texts" -DESCRIPTION = ( - "这里是第16组“睿王和他的五个小跟班”软工三迭代三模型演示页面。" - "模型链接:[Cloudy1225/stackoverflow-roberta-base-sentiment]" - "(https://huggingface.co/Cloudy1225/stackoverflow-roberta-base-sentiment) " -) - -MAX_SAMPLES = 64 - -df = pd.read_csv('./SOF4423.csv') - -with gr.Blocks(title=TITLE) as demo: - gr.HTML(f"

{TITLE}

") - gr.Markdown(DESCRIPTION) - gr.HTML("

Model Inference

") - gr.Markdown(( - "在左侧文本框中输入文本并按回车键,右侧将输出情感分析结果。" - "这里我们展示了三种结果,分别是随机结果、模型结果和 ChatGPT 结果。" - )) - with gr.Row(): - with gr.Column(): - text_input = gr.Textbox(label='Input', - placeholder="Enter a positive or negative sentence here...") - with gr.Column(): - senti_output = gr.Dataframe(type="pandas", value=[['😋', '😋', '😋']], - headers=list(MODEL_MAPPING.keys()), interactive=False) - text_input.submit(analysis, inputs=text_input, outputs=senti_output, show_progress=True) - - gr.Markdown(( - "在左侧文件框中上传 txt/csv 文件,模型会对输入文本的每一行当作一个文本进行情感分析。" - "可以在右侧下载输出文件,输出文件为两列 csv 格式,第一列为原始文本,第二列为分类结果。" - )) - with gr.Row(): - with gr.Column(): - file_input = gr.File(label='File', - file_types=['.txt', '.csv']) - with gr.Column(): - file_output = gr.File(label='Output') - file_input.upload(analyse_file, inputs=file_input, outputs=file_output) - - gr.HTML("

Model Evaluation

") - gr.Markdown(( - "这里是在 StackOverflow4423 数据集上评估我们的模型。" - "滑动 Slider,将会从 StackOverflow4423 数据集中抽样出指定数量的样本,预测其情感标签。" - "并根据预测结果绘制标签分布图和混淆矩阵。" - )) - input_models = list(MODEL_MAPPING) - input_n_samples = gr.Slider( - minimum=4, - maximum=MAX_SAMPLES, - value=8, - step=4, - label='Number of samples' - ) - - with gr.Row(): - with gr.Column(): - bar_plot = gr.Plot(label='Predictions Frequency') - with gr.Column(): - cm_plot = gr.Plot(label='Confusion Matrix') - - with gr.Row(): - dataframe = gr.Dataframe(type="pandas", wrap=True, headers=['Text', 'Label', 'Predict']) - - input_n_samples.change(fn=classify, inputs=input_n_samples, outputs=[dataframe, bar_plot, cm_plot]) - -demo.launch() diff --git a/spaces/CofAI/viewq/index.html b/spaces/CofAI/viewq/index.html deleted file mode 100644 index 2521505a17f7e85156d7af8eaf9e44af2786b2e9..0000000000000000000000000000000000000000 --- a/spaces/CofAI/viewq/index.html +++ /dev/null @@ -1,21 +0,0 @@ - - - - ViewQ - - - -

ViewQ

- - - - -

-

ChatGPT by ViewQ

-

- -
- - - diff --git a/spaces/Cvandi/remake/experiments/pretrained_models/README.md b/spaces/Cvandi/remake/experiments/pretrained_models/README.md deleted file mode 100644 index d0cc4afcbdd2c733f6b946bb86bd00baa90e8295..0000000000000000000000000000000000000000 --- a/spaces/Cvandi/remake/experiments/pretrained_models/README.md +++ /dev/null @@ -1 +0,0 @@ -# Put downloaded pre-trained models here diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_app.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_app.py deleted file mode 100644 index 8fd4471d3af019c6e3bd01fcb9838ee99636238e..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_app.py +++ /dev/null @@ -1,557 +0,0 @@ -import asyncio -import logging -import warnings -from functools import partial, update_wrapper -from typing import ( - TYPE_CHECKING, - Any, - AsyncIterator, - Awaitable, - Callable, - Dict, - Iterable, - Iterator, - List, - Mapping, - MutableMapping, - Optional, - Sequence, - Tuple, - Type, - Union, - cast, -) - -from aiosignal import Signal -from frozenlist import FrozenList - -from . import hdrs -from .abc import ( - AbstractAccessLogger, - AbstractMatchInfo, - AbstractRouter, - AbstractStreamWriter, -) -from .helpers import DEBUG -from .http_parser import RawRequestMessage -from .log import web_logger -from .streams import StreamReader -from .web_log import AccessLogger -from .web_middlewares import _fix_request_current_app -from .web_protocol import RequestHandler -from .web_request import Request -from .web_response import StreamResponse -from .web_routedef import AbstractRouteDef -from .web_server import Server -from .web_urldispatcher import ( - AbstractResource, - AbstractRoute, - Domain, - MaskDomain, - MatchedSubAppResource, - PrefixedSubAppResource, - UrlDispatcher, -) - -__all__ = ("Application", "CleanupError") - - -if TYPE_CHECKING: # pragma: no cover - from .typedefs import Handler - - _AppSignal = Signal[Callable[["Application"], Awaitable[None]]] - _RespPrepareSignal = Signal[Callable[[Request, StreamResponse], Awaitable[None]]] - _Middleware = Union[ - Callable[[Request, Handler], Awaitable[StreamResponse]], - Callable[["Application", Handler], Awaitable[Handler]], # old-style - ] - _Middlewares = FrozenList[_Middleware] - _MiddlewaresHandlers = Optional[Sequence[Tuple[_Middleware, bool]]] - _Subapps = List["Application"] -else: - # No type checker mode, skip types - _AppSignal = Signal - _RespPrepareSignal = Signal - _Middleware = Callable - _Middlewares = FrozenList - _MiddlewaresHandlers = Optional[Sequence] - _Subapps = List - - -class Application(MutableMapping[str, Any]): - ATTRS = frozenset( - [ - "logger", - "_debug", - "_router", - "_loop", - "_handler_args", - "_middlewares", - "_middlewares_handlers", - "_run_middlewares", - "_state", - "_frozen", - "_pre_frozen", - "_subapps", - "_on_response_prepare", - "_on_startup", - "_on_shutdown", - "_on_cleanup", - "_client_max_size", - "_cleanup_ctx", - ] - ) - - def __init__( - self, - *, - logger: logging.Logger = web_logger, - router: Optional[UrlDispatcher] = None, - middlewares: Iterable[_Middleware] = (), - handler_args: Optional[Mapping[str, Any]] = None, - client_max_size: int = 1024**2, - loop: Optional[asyncio.AbstractEventLoop] = None, - debug: Any = ..., # mypy doesn't support ellipsis - ) -> None: - if router is None: - router = UrlDispatcher() - else: - warnings.warn( - "router argument is deprecated", DeprecationWarning, stacklevel=2 - ) - assert isinstance(router, AbstractRouter), router - - if loop is not None: - warnings.warn( - "loop argument is deprecated", DeprecationWarning, stacklevel=2 - ) - - if debug is not ...: - warnings.warn( - "debug argument is deprecated", DeprecationWarning, stacklevel=2 - ) - self._debug = debug - self._router: UrlDispatcher = router - self._loop = loop - self._handler_args = handler_args - self.logger = logger - - self._middlewares: _Middlewares = FrozenList(middlewares) - - # initialized on freezing - self._middlewares_handlers: _MiddlewaresHandlers = None - # initialized on freezing - self._run_middlewares: Optional[bool] = None - - self._state: Dict[str, Any] = {} - self._frozen = False - self._pre_frozen = False - self._subapps: _Subapps = [] - - self._on_response_prepare: _RespPrepareSignal = Signal(self) - self._on_startup: _AppSignal = Signal(self) - self._on_shutdown: _AppSignal = Signal(self) - self._on_cleanup: _AppSignal = Signal(self) - self._cleanup_ctx = CleanupContext() - self._on_startup.append(self._cleanup_ctx._on_startup) - self._on_cleanup.append(self._cleanup_ctx._on_cleanup) - self._client_max_size = client_max_size - - def __init_subclass__(cls: Type["Application"]) -> None: - warnings.warn( - "Inheritance class {} from web.Application " - "is discouraged".format(cls.__name__), - DeprecationWarning, - stacklevel=2, - ) - - if DEBUG: # pragma: no cover - - def __setattr__(self, name: str, val: Any) -> None: - if name not in self.ATTRS: - warnings.warn( - "Setting custom web.Application.{} attribute " - "is discouraged".format(name), - DeprecationWarning, - stacklevel=2, - ) - super().__setattr__(name, val) - - # MutableMapping API - - def __eq__(self, other: object) -> bool: - return self is other - - def __getitem__(self, key: str) -> Any: - return self._state[key] - - def _check_frozen(self) -> None: - if self._frozen: - warnings.warn( - "Changing state of started or joined " "application is deprecated", - DeprecationWarning, - stacklevel=3, - ) - - def __setitem__(self, key: str, value: Any) -> None: - self._check_frozen() - self._state[key] = value - - def __delitem__(self, key: str) -> None: - self._check_frozen() - del self._state[key] - - def __len__(self) -> int: - return len(self._state) - - def __iter__(self) -> Iterator[str]: - return iter(self._state) - - ######## - @property - def loop(self) -> asyncio.AbstractEventLoop: - # Technically the loop can be None - # but we mask it by explicit type cast - # to provide more convinient type annotation - warnings.warn("loop property is deprecated", DeprecationWarning, stacklevel=2) - return cast(asyncio.AbstractEventLoop, self._loop) - - def _set_loop(self, loop: Optional[asyncio.AbstractEventLoop]) -> None: - if loop is None: - loop = asyncio.get_event_loop() - if self._loop is not None and self._loop is not loop: - raise RuntimeError( - "web.Application instance initialized with different loop" - ) - - self._loop = loop - - # set loop debug - if self._debug is ...: - self._debug = loop.get_debug() - - # set loop to sub applications - for subapp in self._subapps: - subapp._set_loop(loop) - - @property - def pre_frozen(self) -> bool: - return self._pre_frozen - - def pre_freeze(self) -> None: - if self._pre_frozen: - return - - self._pre_frozen = True - self._middlewares.freeze() - self._router.freeze() - self._on_response_prepare.freeze() - self._cleanup_ctx.freeze() - self._on_startup.freeze() - self._on_shutdown.freeze() - self._on_cleanup.freeze() - self._middlewares_handlers = tuple(self._prepare_middleware()) - - # If current app and any subapp do not have middlewares avoid run all - # of the code footprint that it implies, which have a middleware - # hardcoded per app that sets up the current_app attribute. If no - # middlewares are configured the handler will receive the proper - # current_app without needing all of this code. - self._run_middlewares = True if self.middlewares else False - - for subapp in self._subapps: - subapp.pre_freeze() - self._run_middlewares = self._run_middlewares or subapp._run_middlewares - - @property - def frozen(self) -> bool: - return self._frozen - - def freeze(self) -> None: - if self._frozen: - return - - self.pre_freeze() - self._frozen = True - for subapp in self._subapps: - subapp.freeze() - - @property - def debug(self) -> bool: - warnings.warn("debug property is deprecated", DeprecationWarning, stacklevel=2) - return self._debug # type: ignore[no-any-return] - - def _reg_subapp_signals(self, subapp: "Application") -> None: - def reg_handler(signame: str) -> None: - subsig = getattr(subapp, signame) - - async def handler(app: "Application") -> None: - await subsig.send(subapp) - - appsig = getattr(self, signame) - appsig.append(handler) - - reg_handler("on_startup") - reg_handler("on_shutdown") - reg_handler("on_cleanup") - - def add_subapp(self, prefix: str, subapp: "Application") -> AbstractResource: - if not isinstance(prefix, str): - raise TypeError("Prefix must be str") - prefix = prefix.rstrip("/") - if not prefix: - raise ValueError("Prefix cannot be empty") - factory = partial(PrefixedSubAppResource, prefix, subapp) - return self._add_subapp(factory, subapp) - - def _add_subapp( - self, resource_factory: Callable[[], AbstractResource], subapp: "Application" - ) -> AbstractResource: - if self.frozen: - raise RuntimeError("Cannot add sub application to frozen application") - if subapp.frozen: - raise RuntimeError("Cannot add frozen application") - resource = resource_factory() - self.router.register_resource(resource) - self._reg_subapp_signals(subapp) - self._subapps.append(subapp) - subapp.pre_freeze() - if self._loop is not None: - subapp._set_loop(self._loop) - return resource - - def add_domain(self, domain: str, subapp: "Application") -> AbstractResource: - if not isinstance(domain, str): - raise TypeError("Domain must be str") - elif "*" in domain: - rule: Domain = MaskDomain(domain) - else: - rule = Domain(domain) - factory = partial(MatchedSubAppResource, rule, subapp) - return self._add_subapp(factory, subapp) - - def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]: - return self.router.add_routes(routes) - - @property - def on_response_prepare(self) -> _RespPrepareSignal: - return self._on_response_prepare - - @property - def on_startup(self) -> _AppSignal: - return self._on_startup - - @property - def on_shutdown(self) -> _AppSignal: - return self._on_shutdown - - @property - def on_cleanup(self) -> _AppSignal: - return self._on_cleanup - - @property - def cleanup_ctx(self) -> "CleanupContext": - return self._cleanup_ctx - - @property - def router(self) -> UrlDispatcher: - return self._router - - @property - def middlewares(self) -> _Middlewares: - return self._middlewares - - def _make_handler( - self, - *, - loop: Optional[asyncio.AbstractEventLoop] = None, - access_log_class: Type[AbstractAccessLogger] = AccessLogger, - **kwargs: Any, - ) -> Server: - - if not issubclass(access_log_class, AbstractAccessLogger): - raise TypeError( - "access_log_class must be subclass of " - "aiohttp.abc.AbstractAccessLogger, got {}".format(access_log_class) - ) - - self._set_loop(loop) - self.freeze() - - kwargs["debug"] = self._debug - kwargs["access_log_class"] = access_log_class - if self._handler_args: - for k, v in self._handler_args.items(): - kwargs[k] = v - - return Server( - self._handle, # type: ignore[arg-type] - request_factory=self._make_request, - loop=self._loop, - **kwargs, - ) - - def make_handler( - self, - *, - loop: Optional[asyncio.AbstractEventLoop] = None, - access_log_class: Type[AbstractAccessLogger] = AccessLogger, - **kwargs: Any, - ) -> Server: - - warnings.warn( - "Application.make_handler(...) is deprecated, " "use AppRunner API instead", - DeprecationWarning, - stacklevel=2, - ) - - return self._make_handler( - loop=loop, access_log_class=access_log_class, **kwargs - ) - - async def startup(self) -> None: - """Causes on_startup signal - - Should be called in the event loop along with the request handler. - """ - await self.on_startup.send(self) - - async def shutdown(self) -> None: - """Causes on_shutdown signal - - Should be called before cleanup() - """ - await self.on_shutdown.send(self) - - async def cleanup(self) -> None: - """Causes on_cleanup signal - - Should be called after shutdown() - """ - if self.on_cleanup.frozen: - await self.on_cleanup.send(self) - else: - # If an exception occurs in startup, ensure cleanup contexts are completed. - await self._cleanup_ctx._on_cleanup(self) - - def _make_request( - self, - message: RawRequestMessage, - payload: StreamReader, - protocol: RequestHandler, - writer: AbstractStreamWriter, - task: "asyncio.Task[None]", - _cls: Type[Request] = Request, - ) -> Request: - return _cls( - message, - payload, - protocol, - writer, - task, - self._loop, - client_max_size=self._client_max_size, - ) - - def _prepare_middleware(self) -> Iterator[Tuple[_Middleware, bool]]: - for m in reversed(self._middlewares): - if getattr(m, "__middleware_version__", None) == 1: - yield m, True - else: - warnings.warn( - 'old-style middleware "{!r}" deprecated, ' "see #2252".format(m), - DeprecationWarning, - stacklevel=2, - ) - yield m, False - - yield _fix_request_current_app(self), True - - async def _handle(self, request: Request) -> StreamResponse: - loop = asyncio.get_event_loop() - debug = loop.get_debug() - match_info = await self._router.resolve(request) - if debug: # pragma: no cover - if not isinstance(match_info, AbstractMatchInfo): - raise TypeError( - "match_info should be AbstractMatchInfo " - "instance, not {!r}".format(match_info) - ) - match_info.add_app(self) - - match_info.freeze() - - resp = None - request._match_info = match_info - expect = request.headers.get(hdrs.EXPECT) - if expect: - resp = await match_info.expect_handler(request) - await request.writer.drain() - - if resp is None: - handler = match_info.handler - - if self._run_middlewares: - for app in match_info.apps[::-1]: - for m, new_style in app._middlewares_handlers: # type: ignore[union-attr] # noqa - if new_style: - handler = update_wrapper( - partial(m, handler=handler), handler - ) - else: - handler = await m(app, handler) # type: ignore[arg-type] - - resp = await handler(request) - - return resp - - def __call__(self) -> "Application": - """gunicorn compatibility""" - return self - - def __repr__(self) -> str: - return f"" - - def __bool__(self) -> bool: - return True - - -class CleanupError(RuntimeError): - @property - def exceptions(self) -> List[BaseException]: - return cast(List[BaseException], self.args[1]) - - -if TYPE_CHECKING: # pragma: no cover - _CleanupContextBase = FrozenList[Callable[[Application], AsyncIterator[None]]] -else: - _CleanupContextBase = FrozenList - - -class CleanupContext(_CleanupContextBase): - def __init__(self) -> None: - super().__init__() - self._exits: List[AsyncIterator[None]] = [] - - async def _on_startup(self, app: Application) -> None: - for cb in self: - it = cb(app).__aiter__() - await it.__anext__() - self._exits.append(it) - - async def _on_cleanup(self, app: Application) -> None: - errors = [] - for it in reversed(self._exits): - try: - await it.__anext__() - except StopAsyncIteration: - pass - except Exception as exc: - errors.append(exc) - else: - errors.append(RuntimeError(f"{it!r} has more than one 'yield'")) - if errors: - if len(errors) == 1: - raise errors[0] - else: - raise CleanupError("Multiple errors on cleanup stage", errors) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/certifi/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/certifi/__init__.py deleted file mode 100644 index 705f416d6b06ce5f51b3ff47c49d078e93c6f034..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/certifi/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .core import contents, where - -__all__ = ["contents", "where"] -__version__ = "2023.05.07" diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/M_A_T_H_.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/M_A_T_H_.py deleted file mode 100644 index 011426b52a195bb2596116cc7bce0ad6e671eb23..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/M_A_T_H_.py +++ /dev/null @@ -1,5 +0,0 @@ -from .otBase import BaseTTXConverter - - -class table_M_A_T_H_(BaseTTXConverter): - pass diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_k_e_r_n.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_k_e_r_n.py deleted file mode 100644 index 94183c8a0a1e8a02cfc229d525030d9ae2b27ddf..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_k_e_r_n.py +++ /dev/null @@ -1,279 +0,0 @@ -from fontTools.ttLib import getSearchRange -from fontTools.misc.textTools import safeEval, readHex -from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi -from . import DefaultTable -import struct -import sys -import array -import logging - - -log = logging.getLogger(__name__) - - -class table__k_e_r_n(DefaultTable.DefaultTable): - def getkern(self, format): - for subtable in self.kernTables: - if subtable.format == format: - return subtable - return None # not found - - def decompile(self, data, ttFont): - version, nTables = struct.unpack(">HH", data[:4]) - apple = False - if (len(data) >= 8) and (version == 1): - # AAT Apple's "new" format. Hm. - version, nTables = struct.unpack(">LL", data[:8]) - self.version = fi2fl(version, 16) - data = data[8:] - apple = True - else: - self.version = version - data = data[4:] - self.kernTables = [] - for i in range(nTables): - if self.version == 1.0: - # Apple - length, coverage, subtableFormat = struct.unpack(">LBB", data[:6]) - else: - # in OpenType spec the "version" field refers to the common - # subtable header; the actual subtable format is stored in - # the 8-15 mask bits of "coverage" field. - # This "version" is always 0 so we ignore it here - _, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6]) - if nTables == 1 and subtableFormat == 0: - # The "length" value is ignored since some fonts - # (like OpenSans and Calibri) have a subtable larger than - # its value. - (nPairs,) = struct.unpack(">H", data[6:8]) - calculated_length = (nPairs * 6) + 14 - if length != calculated_length: - log.warning( - "'kern' subtable longer than defined: " - "%d bytes instead of %d bytes" % (calculated_length, length) - ) - length = calculated_length - if subtableFormat not in kern_classes: - subtable = KernTable_format_unkown(subtableFormat) - else: - subtable = kern_classes[subtableFormat](apple) - subtable.decompile(data[:length], ttFont) - self.kernTables.append(subtable) - data = data[length:] - - def compile(self, ttFont): - if hasattr(self, "kernTables"): - nTables = len(self.kernTables) - else: - nTables = 0 - if self.version == 1.0: - # AAT Apple's "new" format. - data = struct.pack(">LL", fl2fi(self.version, 16), nTables) - else: - data = struct.pack(">HH", self.version, nTables) - if hasattr(self, "kernTables"): - for subtable in self.kernTables: - data = data + subtable.compile(ttFont) - return data - - def toXML(self, writer, ttFont): - writer.simpletag("version", value=self.version) - writer.newline() - for subtable in self.kernTables: - subtable.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name == "version": - self.version = safeEval(attrs["value"]) - return - if name != "kernsubtable": - return - if not hasattr(self, "kernTables"): - self.kernTables = [] - format = safeEval(attrs["format"]) - if format not in kern_classes: - subtable = KernTable_format_unkown(format) - else: - apple = self.version == 1.0 - subtable = kern_classes[format](apple) - self.kernTables.append(subtable) - subtable.fromXML(name, attrs, content, ttFont) - - -class KernTable_format_0(object): - - # 'version' is kept for backward compatibility - version = format = 0 - - def __init__(self, apple=False): - self.apple = apple - - def decompile(self, data, ttFont): - if not self.apple: - version, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6]) - if version != 0: - from fontTools.ttLib import TTLibError - - raise TTLibError("unsupported kern subtable version: %d" % version) - tupleIndex = None - # Should we also assert length == len(data)? - data = data[6:] - else: - length, coverage, subtableFormat, tupleIndex = struct.unpack( - ">LBBH", data[:8] - ) - data = data[8:] - assert self.format == subtableFormat, "unsupported format" - self.coverage = coverage - self.tupleIndex = tupleIndex - - self.kernTable = kernTable = {} - - nPairs, searchRange, entrySelector, rangeShift = struct.unpack( - ">HHHH", data[:8] - ) - data = data[8:] - - datas = array.array("H", data[: 6 * nPairs]) - if sys.byteorder != "big": - datas.byteswap() - it = iter(datas) - glyphOrder = ttFont.getGlyphOrder() - for k in range(nPairs): - left, right, value = next(it), next(it), next(it) - if value >= 32768: - value -= 65536 - try: - kernTable[(glyphOrder[left], glyphOrder[right])] = value - except IndexError: - # Slower, but will not throw an IndexError on an invalid - # glyph id. - kernTable[ - (ttFont.getGlyphName(left), ttFont.getGlyphName(right)) - ] = value - if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess - log.warning( - "excess data in 'kern' subtable: %d bytes", len(data) - 6 * nPairs - ) - - def compile(self, ttFont): - nPairs = min(len(self.kernTable), 0xFFFF) - searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6) - searchRange &= 0xFFFF - entrySelector = min(entrySelector, 0xFFFF) - rangeShift = min(rangeShift, 0xFFFF) - data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift) - - # yeehee! (I mean, turn names into indices) - try: - reverseOrder = ttFont.getReverseGlyphMap() - kernTable = sorted( - (reverseOrder[left], reverseOrder[right], value) - for ((left, right), value) in self.kernTable.items() - ) - except KeyError: - # Slower, but will not throw KeyError on invalid glyph id. - getGlyphID = ttFont.getGlyphID - kernTable = sorted( - (getGlyphID(left), getGlyphID(right), value) - for ((left, right), value) in self.kernTable.items() - ) - - for left, right, value in kernTable: - data = data + struct.pack(">HHh", left, right, value) - - if not self.apple: - version = 0 - length = len(data) + 6 - if length >= 0x10000: - log.warning( - '"kern" subtable overflow, ' - "truncating length value while preserving pairs." - ) - length &= 0xFFFF - header = struct.pack(">HHBB", version, length, self.format, self.coverage) - else: - if self.tupleIndex is None: - # sensible default when compiling a TTX from an old fonttools - # or when inserting a Windows-style format 0 subtable into an - # Apple version=1.0 kern table - log.warning("'tupleIndex' is None; default to 0") - self.tupleIndex = 0 - length = len(data) + 8 - header = struct.pack( - ">LBBH", length, self.coverage, self.format, self.tupleIndex - ) - return header + data - - def toXML(self, writer, ttFont): - attrs = dict(coverage=self.coverage, format=self.format) - if self.apple: - if self.tupleIndex is None: - log.warning("'tupleIndex' is None; default to 0") - attrs["tupleIndex"] = 0 - else: - attrs["tupleIndex"] = self.tupleIndex - writer.begintag("kernsubtable", **attrs) - writer.newline() - items = sorted(self.kernTable.items()) - for (left, right), value in items: - writer.simpletag("pair", [("l", left), ("r", right), ("v", value)]) - writer.newline() - writer.endtag("kernsubtable") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.coverage = safeEval(attrs["coverage"]) - subtableFormat = safeEval(attrs["format"]) - if self.apple: - if "tupleIndex" in attrs: - self.tupleIndex = safeEval(attrs["tupleIndex"]) - else: - # previous fontTools versions didn't export tupleIndex - log.warning("Apple kern subtable is missing 'tupleIndex' attribute") - self.tupleIndex = None - else: - self.tupleIndex = None - assert subtableFormat == self.format, "unsupported format" - if not hasattr(self, "kernTable"): - self.kernTable = {} - for element in content: - if not isinstance(element, tuple): - continue - name, attrs, content = element - self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"]) - - def __getitem__(self, pair): - return self.kernTable[pair] - - def __setitem__(self, pair, value): - self.kernTable[pair] = value - - def __delitem__(self, pair): - del self.kernTable[pair] - - -class KernTable_format_unkown(object): - def __init__(self, format): - self.format = format - - def decompile(self, data, ttFont): - self.data = data - - def compile(self, ttFont): - return self.data - - def toXML(self, writer, ttFont): - writer.begintag("kernsubtable", format=self.format) - writer.newline() - writer.comment("unknown 'kern' subtable format") - writer.newline() - writer.dumphex(self.data) - writer.endtag("kernsubtable") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - self.decompile(readHex(content), ttFont) - - -kern_classes = {0: KernTable_format_0} diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/inference/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/inference/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/boundary.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/boundary.py deleted file mode 100644 index 034691cb8769aff85927ba1ea222b4a690f95e82..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/utils/boundary.py +++ /dev/null @@ -1,473 +0,0 @@ -""" -@date: 2021/06/19 -@description: -""" -import math -import functools - -from scipy import stats -from scipy.ndimage.filters import maximum_filter -import numpy as np -from typing import List -from utils.conversion import uv2xyz, xyz2uv, depth2xyz, uv2pixel, depth2uv, pixel2uv, xyz2pixel, uv2lonlat -from utils.visibility_polygon import calc_visible_polygon - - -def connect_corners_uv(uv1: np.ndarray, uv2: np.ndarray, length=256) -> np.ndarray: - """ - :param uv1: [u, v] - :param uv2: [u, v] - :param length: Fix the total length in pixel coordinates - :return: - """ - # why -0.5? Check out the uv2Pixel function - p_u1 = uv1[0] * length - 0.5 - p_u2 = uv2[0] * length - 0.5 - - if abs(p_u1 - p_u2) < length / 2: - start = np.ceil(min(p_u1, p_u2)) - p = max(p_u1, p_u2) - end = np.floor(p) - if end == np.ceil(p): - end = end - 1 - else: - start = np.ceil(max(p_u1, p_u2)) - p = min(p_u1, p_u2) + length - end = np.floor(p) - if end == np.ceil(p): - end = end - 1 - p_us = (np.arange(start, end + 1) % length).astype(np.float64) - if len(p_us) == 0: - return None - us = (p_us + 0.5) / length # why +0.5? Check out the uv2Pixel function - - plan_y = boundary_type(np.array([uv1, uv2])) - xyz1 = uv2xyz(np.array(uv1), plan_y) - xyz2 = uv2xyz(np.array(uv2), plan_y) - x1 = xyz1[0] - z1 = xyz1[2] - x2 = xyz2[0] - z2 = xyz2[2] - - d_x = x2 - x1 - d_z = z2 - z1 - - lon_s = (us - 0.5) * 2 * np.pi - k = np.tan(lon_s) - ps = (k * z1 - x1) / (d_x - k * d_z) - cs = np.sqrt((z1 + ps * d_z) ** 2 + (x1 + ps * d_x) ** 2) - - lats = np.arctan2(plan_y, cs) - vs = lats / np.pi + 0.5 - uv = np.stack([us, vs], axis=-1) - - if start == end: - return uv[0:1] - return uv - - -def connect_corners_xyz(uv1: np.ndarray, uv2: np.ndarray, step=0.01) -> np.ndarray: - """ - :param uv1: [u, v] - :param uv2: [u, v] - :param step: Fixed step size in xyz coordinates - :return: - """ - plan_y = boundary_type(np.array([uv1, uv2])) - xyz1 = uv2xyz(np.array(uv1), plan_y) - xyz2 = uv2xyz(np.array(uv2), plan_y) - - vec = xyz2 - xyz1 - norm = np.linalg.norm(vec, ord=2) - direct = vec / norm - xyz = np.array([xyz1 + direct * dis for dis in np.linspace(0, norm, int(norm / step))]) - if len(xyz) == 0: - xyz = np.array([xyz2]) - uv = xyz2uv(xyz) - return uv - - -def connect_corners(uv1: np.ndarray, uv2: np.ndarray, step=0.01, length=None) -> np.ndarray: - """ - :param uv1: [u, v] - :param uv2: [u, v] - :param step: - :param length: - :return: [[u1, v1], [u2, v2]....] if length!=None,length of return result = length - """ - if length is not None: - uv = connect_corners_uv(uv1, uv2, length) - elif step is not None: - uv = connect_corners_xyz(uv1, uv2, step) - else: - uv = np.array([uv1]) - return uv - - -def visibility_corners(corners): - plan_y = boundary_type(corners) - xyz = uv2xyz(corners, plan_y) - xz = xyz[:, ::2] - xz = calc_visible_polygon(center=np.array([0, 0]), polygon=xz, show=False) - xyz = np.insert(xz, 1, plan_y, axis=1) - output = xyz2uv(xyz).astype(np.float32) - return output - - -def corners2boundary(corners: np.ndarray, step=0.01, length=None, visible=True) -> np.ndarray: - """ - When there is occlusion, even if the length is fixed, the final output length may be greater than the given length, - which is more defined as the fixed step size under UV - :param length: - :param step: - :param corners: [[u1, v1], [u2, v2]....] - :param visible: - :return: [[u1, v1], [u2, v2]....] if length!=None,length of return result = length - """ - assert step is not None or length is not None, "the step and length parameters cannot be null at the same time" - if len(corners) < 3: - return corners - - if visible: - corners = visibility_corners(corners) - - n_con = len(corners) - boundary = None - for j in range(n_con): - uv = connect_corners(corners[j], corners[(j + 1) % n_con], step, length) - if uv is None: - continue - if boundary is None: - boundary = uv - else: - boundary = np.concatenate((boundary, uv)) - boundary = np.roll(boundary, -boundary.argmin(axis=0)[0], axis=0) - - output_polygon = [] - for i, p in enumerate(boundary): - q = boundary[(i + 1) % len(boundary)] - if int(p[0] * 10000) == int(q[0] * 10000): - continue - output_polygon.append(p) - output_polygon = np.array(output_polygon, dtype=np.float32) - return output_polygon - - -def corners2boundaries(ratio: float, corners_xyz: np.ndarray = None, corners_uv: np.ndarray = None, step=0.01, - length=None, visible=True): - """ - When both step and length are None, corners are also returned - :param ratio: - :param corners_xyz: - :param corners_uv: - :param step: - :param length: - :param visible: - :return: floor_boundary, ceil_boundary - """ - if corners_xyz is None: - plan_y = boundary_type(corners_uv) - xyz = uv2xyz(corners_uv, plan_y) - floor_xyz = xyz.copy() - ceil_xyz = xyz.copy() - if plan_y > 0: - ceil_xyz[:, 1] *= -ratio - else: - floor_xyz[:, 1] /= -ratio - else: - floor_xyz = corners_xyz.copy() - ceil_xyz = corners_xyz.copy() - if corners_xyz[0][1] > 0: - ceil_xyz[:, 1] *= -ratio - else: - floor_xyz[:, 1] /= -ratio - - floor_uv = xyz2uv(floor_xyz) - ceil_uv = xyz2uv(ceil_xyz) - if step is None and length is None: - return floor_uv, ceil_uv - - floor_boundary = corners2boundary(floor_uv, step, length, visible) - ceil_boundary = corners2boundary(ceil_uv, step, length, visible) - return floor_boundary, ceil_boundary - - -def depth2boundary(depth: np.array, step=0.01, length=None,): - xyz = depth2xyz(depth) - uv = xyz2uv(xyz) - return corners2boundary(uv, step, length, visible=False) - - -def depth2boundaries(ratio: float, depth: np.array, step=0.01, length=None,): - """ - - :param ratio: - :param depth: - :param step: - :param length: - :return: floor_boundary, ceil_boundary - """ - xyz = depth2xyz(depth) - return corners2boundaries(ratio, corners_xyz=xyz, step=step, length=length, visible=False) - - -def boundary_type(corners: np.ndarray) -> int: - """ - Returns the boundary type that also represents the projection plane - :param corners: - :return: - """ - if is_ceil_boundary(corners): - plan_y = -1 - elif is_floor_boundary(corners): - plan_y = 1 - else: - # An intersection occurs and an exception is considered - assert False, 'corners error!' - return plan_y - - -def is_normal_layout(boundaries: List[np.array]): - if len(boundaries) != 2: - print("boundaries length must be 2!") - return False - - if boundary_type(boundaries[0]) != -1: - print("ceil boundary error!") - return False - - if boundary_type(boundaries[1]) != 1: - print("floor boundary error!") - return False - return True - - -def is_ceil_boundary(corners: np.ndarray) -> bool: - m = corners[..., 1].max() - return m < 0.5 - - -def is_floor_boundary(corners: np.ndarray) -> bool: - m = corners[..., 1].min() - return m > 0.5 - - -@functools.lru_cache() -def get_gauss_map(sigma=1.5, width=5): - x = np.arange(width*2 + 1) - width - y = stats.norm(0, sigma).pdf(x) - y = y / y.max() - return y - - -def get_heat_map(u_s, patch_num=256, sigma=2, window_width=15, show=False): - """ - :param window_width: - :param sigma: - :param u_s: [u1, u2, u3, ...] - :param patch_num - :param show - :return: - """ - pixel_us = uv2pixel(u_s, w=patch_num, axis=0) - gauss_map = get_gauss_map(sigma, window_width) - heat_map_all = [] - for u in pixel_us: - heat_map = np.zeros(patch_num, dtype=np.float) - left = u-window_width - right = u+window_width+1 - - offset = 0 - if left < 0: - offset = left - elif right > patch_num: - offset = right - patch_num - - left = left - offset - right = right - offset - heat_map[left:right] = gauss_map - if offset != 0: - heat_map = np.roll(heat_map, offset) - heat_map_all.append(heat_map) - - heat_map_all = np.array(heat_map_all).max(axis=0) - if show: - import matplotlib.pyplot as plt - plt.imshow(heat_map_all[None].repeat(50, axis=0)) - plt.show() - return heat_map_all - - -def find_peaks(signal, size=15*2+1, min_v=0.05, N=None): - # code from HorizonNet: https://github.com/sunset1995/HorizonNet/blob/master/inference.py - max_v = maximum_filter(signal, size=size, mode='wrap') - pk_loc = np.where(max_v == signal)[0] - pk_loc = pk_loc[signal[pk_loc] > min_v] - if N is not None: - order = np.argsort(-signal[pk_loc]) - pk_loc = pk_loc[order[:N]] - pk_loc = pk_loc[np.argsort(pk_loc)] - return pk_loc, signal[pk_loc] - - -def get_object_cor(depth, size, center_u, patch_num=256): - width_u = size[0, center_u] - height_v = size[1, center_u] - boundary_v = size[2, center_u] - - center_boundary_v = depth2uv(depth[center_u:center_u + 1])[0, 1] - center_bottom_v = center_boundary_v - boundary_v - center_top_v = center_bottom_v - height_v - - base_v = center_boundary_v - 0.5 - assert base_v > 0 - - center_u = pixel2uv(np.array([center_u]), w=patch_num, h=patch_num // 2, axis=0)[0] - - center_boundary_uv = np.array([center_u, center_boundary_v]) - center_bottom_uv = np.array([center_u, center_bottom_v]) - center_top_uv = np.array([center_u, center_top_v]) - - left_u = center_u - width_u / 2 - right_u = center_u + width_u / 2 - - left_u = 1 + left_u if left_u < 0 else left_u - right_u = right_u - 1 if right_u > 1 else right_u - - pixel_u = uv2pixel(np.array([left_u, right_u]), w=patch_num, h=patch_num // 2, axis=0) - left_pixel_u = pixel_u[0] - right_pixel_u = pixel_u[1] - - left_boundary_v = depth2uv(depth[left_pixel_u:left_pixel_u + 1])[0, 1] - right_boundary_v = depth2uv(depth[right_pixel_u:right_pixel_u + 1])[0, 1] - - left_boundary_uv = np.array([left_u, left_boundary_v]) - right_boundary_uv = np.array([right_u, right_boundary_v]) - - xyz = uv2xyz(np.array([left_boundary_uv, right_boundary_uv, center_boundary_uv])) - left_boundary_xyz = xyz[0] - right_boundary_xyz = xyz[1] - - # need align - center_boundary_xyz = xyz[2] - center_bottom_xyz = uv2xyz(np.array([center_bottom_uv]))[0] - center_top_xyz = uv2xyz(np.array([center_top_uv]))[0] - center_boundary_norm = np.linalg.norm(center_boundary_xyz[::2]) - center_bottom_norm = np.linalg.norm(center_bottom_xyz[::2]) - center_top_norm = np.linalg.norm(center_top_xyz[::2]) - center_bottom_xyz = center_bottom_xyz * center_boundary_norm / center_bottom_norm - center_top_xyz = center_top_xyz * center_boundary_norm / center_top_norm - - left_bottom_xyz = left_boundary_xyz.copy() - left_bottom_xyz[1] = center_bottom_xyz[1] - right_bottom_xyz = right_boundary_xyz.copy() - right_bottom_xyz[1] = center_bottom_xyz[1] - - left_top_xyz = left_boundary_xyz.copy() - left_top_xyz[1] = center_top_xyz[1] - right_top_xyz = right_boundary_xyz.copy() - right_top_xyz[1] = center_top_xyz[1] - - uv = xyz2uv(np.array([left_bottom_xyz, right_bottom_xyz, left_top_xyz, right_top_xyz])) - left_bottom_uv = uv[0] - right_bottom_uv = uv[1] - left_top_uv = uv[2] - right_top_uv = uv[3] - - return [left_bottom_uv, right_bottom_uv, left_top_uv, right_top_uv], \ - [left_bottom_xyz, right_bottom_xyz, left_top_xyz, right_top_xyz] - - -def layout2depth(boundaries: List[np.array], return_mask=False, show=False, camera_height=1.6): - """ - - :param camera_height: - :param boundaries: [[[u_f1, v_f2], [u_f2, v_f2],...], [[u_c1, v_c2], [u_c2, v_c2]]] - :param return_mask: - :param show: - :return: - """ - # code from HorizonNet: https://github.com/sunset1995/HorizonNet/blob/master/eval_general.py - - w = len(boundaries[0]) - h = w//2 - # Convert corners to per-column boundary first - # Up -pi/2, Down pi/2 - vf = uv2lonlat(boundaries[0]) - vc = uv2lonlat(boundaries[1]) - vc = vc[None, :, 1] # [1, w] - vf = vf[None, :, 1] # [1, w] - assert (vc > 0).sum() == 0 - assert (vf < 0).sum() == 0 - - # Per-pixel v coordinate (vertical angle) - vs = ((np.arange(h) + 0.5) / h - 0.5) * np.pi - vs = np.repeat(vs[:, None], w, axis=1) # [h, w] - - # Floor-plane to depth - floor_h = camera_height - floor_d = np.abs(floor_h / np.sin(vs)) - - # wall to camera distance on horizontal plane at cross camera center - cs = floor_h / np.tan(vf) - - # Ceiling-plane to depth - ceil_h = np.abs(cs * np.tan(vc)) # [1, w] - ceil_d = np.abs(ceil_h / np.sin(vs)) # [h, w] - - # Wall to depth - wall_d = np.abs(cs / np.cos(vs)) # [h, w] - - # Recover layout depth - floor_mask = (vs > vf) - ceil_mask = (vs < vc) - wall_mask = (~floor_mask) & (~ceil_mask) - depth = np.zeros([h, w], np.float32) # [h, w] - depth[floor_mask] = floor_d[floor_mask] - depth[ceil_mask] = ceil_d[ceil_mask] - depth[wall_mask] = wall_d[wall_mask] - - assert (depth == 0).sum() == 0 - if return_mask: - return depth, floor_mask, ceil_mask, wall_mask - if show: - import matplotlib.pyplot as plt - plt.imshow(depth) - plt.show() - return depth - - -def calc_rotation(corners: np.ndarray): - xz = uv2xyz(corners)[..., 0::2] - max_norm = -1 - max_v = None - for i in range(len(xz)): - p_c = xz[i] - p_n = xz[(i + 1) % len(xz)] - v_cn = p_n - p_c - v_norm = np.linalg.norm(v_cn) - if v_norm > max_norm: - max_norm = v_norm - max_v = v_cn - - # v<-----------|o - # | | | - # | ----|----z | - # | | | - # | x \|/ - # |------------u - # It is required that the vector be aligned on the x-axis, z equals y, and x is still x. - # In floorplan, x is displayed as the x-coordinate and z as the y-coordinate - rotation = np.arctan2(max_v[1], max_v[0]) - return rotation - - -if __name__ == '__main__': - corners = np.array([[0.2, 0.7], - [0.4, 0.7], - [0.3, 0.6], - [0.6, 0.6], - [0.8, 0.7]]) - get_heat_map(u=corners[..., 0], show=True, sigma=2, width=15) - pass - diff --git a/spaces/DragGan/DragGan-Inversion/gui_utils/imgui_window.py b/spaces/DragGan/DragGan-Inversion/gui_utils/imgui_window.py deleted file mode 100644 index 5937788f2e8e51772677ab12c67038f5ccd37b42..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/gui_utils/imgui_window.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os -import imgui -import imgui.integrations.glfw - -from . import glfw_window -from . import imgui_utils -from . import text_utils - -# ---------------------------------------------------------------------------- - - -class ImguiWindow(glfw_window.GlfwWindow): - def __init__(self, *, title='ImguiWindow', font=None, font_sizes=range(14, 24), **glfw_kwargs): - if font is None: - font = text_utils.get_default_font() - font_sizes = {int(size) for size in font_sizes} - super().__init__(title=title, **glfw_kwargs) - - # Init fields. - self._imgui_context = None - self._imgui_renderer = None - self._imgui_fonts = None - self._cur_font_size = max(font_sizes) - - # Delete leftover imgui.ini to avoid unexpected behavior. - if os.path.isfile('imgui.ini'): - os.remove('imgui.ini') - - # Init ImGui. - self._imgui_context = imgui.create_context() - self._imgui_renderer = _GlfwRenderer(self._glfw_window) - self._attach_glfw_callbacks() - # Disable creating imgui.ini at runtime. - imgui.get_io().ini_saving_rate = 0 - # Improve behavior with imgui_utils.drag_custom(). - imgui.get_io().mouse_drag_threshold = 0 - self._imgui_fonts = {size: imgui.get_io().fonts.add_font_from_file_ttf( - font, size) for size in font_sizes} - self._imgui_renderer.refresh_font_texture() - - def close(self): - self.make_context_current() - self._imgui_fonts = None - if self._imgui_renderer is not None: - self._imgui_renderer.shutdown() - self._imgui_renderer = None - if self._imgui_context is not None: - # imgui.destroy_context(self._imgui_context) # Commented out to avoid creating imgui.ini at the end. - self._imgui_context = None - super().close() - - def _glfw_key_callback(self, *args): - super()._glfw_key_callback(*args) - self._imgui_renderer.keyboard_callback(*args) - - @property - def font_size(self): - return self._cur_font_size - - @property - def spacing(self): - return round(self._cur_font_size * 0.4) - - def set_font_size(self, target): # Applied on next frame. - self._cur_font_size = min((abs(key - target), key) - for key in self._imgui_fonts.keys())[1] - - def begin_frame(self): - # Begin glfw frame. - super().begin_frame() - - # Process imgui events. - self._imgui_renderer.mouse_wheel_multiplier = self._cur_font_size / 10 - if self.content_width > 0 and self.content_height > 0: - self._imgui_renderer.process_inputs() - - # Begin imgui frame. - imgui.new_frame() - imgui.push_font(self._imgui_fonts[self._cur_font_size]) - imgui_utils.set_default_style( - spacing=self.spacing, indent=self.font_size, scrollbar=self.font_size+4) - - def end_frame(self): - imgui.pop_font() - imgui.render() - imgui.end_frame() - self._imgui_renderer.render(imgui.get_draw_data()) - super().end_frame() - -# ---------------------------------------------------------------------------- -# Wrapper class for GlfwRenderer to fix a mouse wheel bug on Linux. - - -class _GlfwRenderer(imgui.integrations.glfw.GlfwRenderer): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.mouse_wheel_multiplier = 1 - - def scroll_callback(self, window, x_offset, y_offset): - self.io.mouse_wheel += y_offset * self.mouse_wheel_multiplier - -# ---------------------------------------------------------------------------- diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/utils/util.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/utils/util.py deleted file mode 100644 index 544c94895dfc0bfcd1285fde7cd2c102b71113ed..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/stylegan_human/utils/util.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -import torch -import cv2 -from torchvision import transforms -import numpy as np -import math - - -def visual(output, out_path): - output = (output + 1)/2 - output = torch.clamp(output, 0, 1) - if output.shape[1] == 1: - output = torch.cat([output, output, output], 1) - output = output[0].detach().cpu().permute(1, 2, 0).numpy() - output = (output*255).astype(np.uint8) - output = output[:, :, ::-1] - cv2.imwrite(out_path, output) - - -def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05): - - lr_ramp = min(1, (1 - t) / rampdown) - lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi) - lr_ramp = lr_ramp * min(1, t / rampup) - return initial_lr * lr_ramp - - -def latent_noise(latent, strength): - noise = torch.randn_like(latent) * strength - - return latent + noise - - -def noise_regularize_(noises): - loss = 0 - - for noise in noises: - size = noise.shape[2] - - while True: - loss = ( - loss - + (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2) - + (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2) - ) - - if size <= 8: - break - - noise = noise.reshape([-1, 1, size // 2, 2, size // 2, 2]) - noise = noise.mean([3, 5]) - size //= 2 - - return loss - - -def noise_normalize_(noises): - for noise in noises: - mean = noise.mean() - std = noise.std() - - noise.data.add_(-mean).div_(std) - - -def tensor_to_numpy(x): - x = x[0].permute(1, 2, 0) - x = torch.clamp(x, -1, 1) - x = (x+1) * 127.5 - x = x.cpu().detach().numpy().astype(np.uint8) - return x - - -def numpy_to_tensor(x): - x = (x / 255 - 0.5) * 2 - x = torch.from_numpy(x).unsqueeze(0).permute(0, 3, 1, 2) - x = x.cuda().float() - return x - - -def tensor_to_pil(x): - x = torch.clamp(x, -1, 1) - x = (x+1) * 127.5 - return transforms.ToPILImage()(x.squeeze_(0)) diff --git a/spaces/Eddycrack864/Applio-Inference/train/losses.py b/spaces/Eddycrack864/Applio-Inference/train/losses.py deleted file mode 100644 index b89038f14d06d7fae43628183e9ffb465e4edafd..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/train/losses.py +++ /dev/null @@ -1,59 +0,0 @@ -import torch -from torch.nn import functional as F - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg**2) - loss += r_loss + g_loss - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/FedeFT/Head_Pose_Estimation_and_LAEO_computation/utils/my_utils.py b/spaces/FedeFT/Head_Pose_Estimation_and_LAEO_computation/utils/my_utils.py deleted file mode 100644 index 3ca21d557102ea9f8a811a699f2baea6858d8155..0000000000000000000000000000000000000000 --- a/spaces/FedeFT/Head_Pose_Estimation_and_LAEO_computation/utils/my_utils.py +++ /dev/null @@ -1,1375 +0,0 @@ -import numpy as np -from scipy.spatial import distance as dist -from utils.labels import pose_id_part, pose_id_part_openpose, rev_pose_id_part_openpose, rev_pose_id_part -import cv2 -import os -import json - - -def rescale_bb(boxes, pad, im_width, im_height): - """ - Modify in place the bounding box coordinates (percentage) to the new image width and height - - Args: - :boxes (numpy.ndarray): Array of bounding box coordinates expressed in percentage [y_min, x_min, y_max, x_max] - :pad (tuple): The first element represents the right padding (applied by resize_preserving_ar() function); - the second element represents the bottom padding (applied by resize_preserving_ar() function) and - the third element is a tuple that is the shape of the image after resizing without the padding (this is useful for - the coordinates changes) - :im_width (int): The new image width - :im_height (int): The new image height - - Returns: - """ - - right_padding = pad[0] - bottom_padding = pad[1] - - if bottom_padding != 0: - for box in boxes: - y_min, y_max = box[0] * im_height, box[2] * im_height # to pixels - box[0], box[2] = y_min / (im_height - pad[1]), y_max / (im_height - pad[1]) # back to percentage - - if right_padding != 0: - for box in boxes: - x_min, x_max = box[1] * im_width, box[3] * im_width # to pixels - box[1], box[3] = x_min / (im_width - pad[0]), x_max / (im_width - pad[0]) # back to percentage - - -def rescale_key_points(key_points, pad, im_width, im_height): - """ - Modify in place the bounding box coordinates (percentage) to the new image width and height - - Args: - :key_points (numpy.ndarray): Array of bounding box coordinates expressed in percentage [y_min, x_min, y_max, x_max] - :pad (tuple): The first element represents the right padding (applied by resize_preserving_ar() function); - the second element represents the bottom padding (applied by resize_preserving_ar() function) and - the third element is a tuple that is the shape of the image after resizing without the padding (this is useful for - the coordinates changes) - :im_width (int): The new image width - :im_height (int): The new image height - - Returns: - """ - - right_padding = pad[0] - bottom_padding = pad[1] - - if bottom_padding != 0: - for aux in key_points: - for point in aux: # x 1 y 0 - y = point[0] * im_height - point[0] = y / (im_height - pad[1]) - - if right_padding != 0: - for aux in key_points: - for point in aux: - x = point[1] * im_width - point[1] = x / (im_width - pad[0]) - - -def change_coordinates_aspect_ratio(aux_key_points_array, img_person, img_person_resized): - """ - - Args: - : - - Returns: - : - """ - - aux_key_points_array_ratio = [] - ratio_h, ratio_w = img_person.shape[0] / (img_person_resized.shape[1]), img_person.shape[1] / (img_person_resized.shape[2]) # shape 0 batch 1 - - for elem in aux_key_points_array: - aux = np.zeros(3) - aux[0] = int((elem[0]) * ratio_h) - aux[1] = int(elem[1] * ratio_h) - aux[2] = int(elem[2]) - aux_key_points_array_ratio.append(aux) - - aux_key_points_array_ratio = np.array(aux_key_points_array_ratio, dtype=int) - - return aux_key_points_array_ratio - - -def parse_output_pose(heatmaps, offsets, threshold): - """ - Parse the output pose (auxiliary function for tflite models) - Args: - : - - Returns: - : - """ - # - # heatmaps: 9x9x17 probability of appearance of each keypoint in the particular part of the image (9,9) -> used to locate position of the joints - # offsets: 9x9x34 used for calculation of the keypoint's position (first 17 x coords, the second 17 y coords) - # - joint_num = heatmaps.shape[-1] - pose_kps = np.zeros((joint_num, 3), np.uint32) - - for i in range(heatmaps.shape[-1]): - joint_heatmap = heatmaps[..., i] - max_val_pos = np.squeeze(np.argwhere(joint_heatmap == np.max(joint_heatmap))) - remap_pos = np.array(max_val_pos / 8 * 257, dtype=np.int32) - pose_kps[i, 0] = int(remap_pos[0] + offsets[max_val_pos[0], max_val_pos[1], i]) - pose_kps[i, 1] = int(remap_pos[1] + offsets[max_val_pos[0], max_val_pos[1], i + joint_num]) - max_prob = np.max(joint_heatmap) - - if max_prob > threshold: - if pose_kps[i, 0] < 257 and pose_kps[i, 1] < 257: - pose_kps[i, 2] = 1 - - return pose_kps - - -def retrieve_xyz_from_detection(points_list, point_cloud_img): - """ - Retrieve the xyz of the list of points passed as input (if we have the point cloud of the image) - Args: - :points_list (list): list of points for which we want to retrieve xyz information - :point_cloud_img (numpy.ndarray): numpy array containing XYZRGBA information of the image - - Returns: - :xyz (list): list of lists of 3D points with XYZ information (left camera origin (0,0,0)) - """ - - xyz = [[point_cloud_img[:, :, 0][point[1], point[0]], point_cloud_img[:, :, 1][point[1], point[0]], point_cloud_img[:, :, 2][point[1], point[0]]] - for point in points_list] - return xyz - - -def retrieve_xyz_pose_points(point_cloud_image, key_points_score, key_points): - """Retrieve the key points from the point cloud to get the XYZ position in the 3D space - - Args: - :point_cloud_image (numpy.ndarray): - :key_points_score (list): - :key_points (list): - - Returns: - :xyz_pose: a list of lists representing the XYZ 3D coordinates of each key point (j is the index number of the id pose) - """ - xyz_pose = [] - - for i in range(len(key_points_score)): - xyz_pose_aux = [] - for j in range(len(key_points_score[i])): - # if key_points_score[i][j] > threshold:# and j < 5: - x, y = int(key_points[i][j][0] * point_cloud_image.shape[0]) - 1, int(key_points[i][j][1] * point_cloud_image.shape[1]) - 1 - xyz_pose_aux.append([point_cloud_image[x, y, 0], point_cloud_image[x, y, 1], point_cloud_image[x, y, 2], key_points_score[i][j]]) - - xyz_pose.append(xyz_pose_aux) - return xyz_pose - - -def compute_distance(points_list, min_distance=1.5): - """ - Compute the distance between each point and find if there are points that are closer to each other that do not respect a certain distance - expressed in meter. - - Args: - :points_list (list): list of points expressed in xyz 3D coordinates (meters) - :min_distance (float): minimum threshold for distances (if the l2 distance between two objects is lower than this value it is considered a violation) - (default is 1.5) - - Returns: - :distance_matrix: matrix containing the distances between each points (diagonal 0) - :violate: set of points that violate the minimum distance threshold - :couple_points: list of lists of couple points that violate the min_distance threshold (to keep track of each couple) - """ - - if points_list is None or len(points_list) == 1 or len(points_list) == 0: - return None, None, None - else: # if there are more than two points - violate = set() - couple_points = [] - aux = np.array(points_list) - distance_matrix = dist.cdist(aux, aux, 'euclidean') - for i in range(0, distance_matrix.shape[0]): # loop over the upper triangular of the distance matrix - for j in range(i + 1, distance_matrix.shape[1]): - if distance_matrix[i, j] < min_distance: - # print("Distance between {} and {} is {:.2f} meters".format(i, j, distance_matrix[i, j])) - violate.add(i) - violate.add(j) - couple_points.append((i, j)) - - return distance_matrix, violate, couple_points - - -def initialize_video_recorder(output_path, output_depth_path, fps, shape): - """Initialize OpenCV video recorders that will be used to write each image/frame to a single video - - Args: - :output (str): The file location where the recorded video will be saved - :output_depth (str): The file location where the recorded video with depth information will be saved - :fps (int): The frame per seconds of the output videos - :shape (tuple): The dimension of the output video (width, height) - - Returns: - :writer (cv2.VideoWriter): The video writer used to save the video - :writer_depth (cv2.VideoWriter): The video writer used to save the video with depth information - """ - - if not os.path.isdir(os.path.split(output_path)[0]): - logger.error("Invalid path for the video writer; folder does not exist") - exit(1) - - fourcc = cv2.VideoWriter_fourcc(*"MJPG") - writer = cv2.VideoWriter(output_path, fourcc, fps, shape, True) - writer_depth = None - - if output_depth_path: - if not os.path.isdir(os.path.split(output_depth_path)[0]): - logger.error("Invalid path for the depth video writer; folder does not exist") - exit(1) - writer_depth = cv2.VideoWriter(output_depth_path, fourcc, fps, shape, True) - - return writer, writer_depth - - -def delete_items_from_array_aux(arr, i): - """ - Auxiliary function that delete the item at a certain index from a numpy array - - Args: - :arr (numpy.ndarray): Array of array where each element correspond to the four coordinates of bounding box expressed in percentage - :i (int): Index of the element to be deleted - - Returns: - :arr_ret: the array without the element at index i - """ - - aux = arr.tolist() - aux.pop(i) - arr_ret = np.array(aux) - return arr_ret - - -def fit_plane_least_square(xyz): - # find a plane that best fit xyz points using least squares - (rows, cols) = xyz.shape - g = np.ones((rows, 3)) - g[:, 0] = xyz[:, 0] # X - g[:, 1] = xyz[:, 1] # Y - z = xyz[:, 2] - (a, b, c), _, rank, s = np.linalg.lstsq(g, z, rcond=None) - - normal = (a, b, -1) - nn = np.linalg.norm(normal) - normal = normal / nn - point = np.array([0.0, 0.0, c]) - d = -point.dot(normal) - return d, normal, point - - -# -# def plot_plane(data, normal, d): -# from mpl_toolkits.mplot3d import Axes3D -# import matplotlib.pyplot as plt -# -# fig = plt.figure() -# ax = fig.gca(projection='3d') -# -# # plot fitted plane -# maxx = np.max(data[:, 0]) -# maxy = np.max(data[:, 1]) -# minx = np.min(data[:, 0]) -# miny = np.min(data[:, 1]) -# -# # compute needed points for plane plotting -# xx, yy = np.meshgrid([minx - 10, maxx + 10], [miny - 10, maxy + 10]) -# z = (-normal[0] * xx - normal[1] * yy - d) * 1. / normal[2] -# -# # plot plane -# ax.plot_surface(xx, yy, z, alpha=0.2) -# -# ax.set_xlabel('x') -# ax.set_ylabel('y') -# ax.set_zlabel('z') -# plt.show() -# -# return - - -def shape_to_np(shape, dtype="int"): - """ - Function used for the dlib facial detector; it determine the facial landmarks for the face region, then convert the facial landmark - (x, y)-coordinates to a NumPy array - - Args: - :shape (): - :dtype (): - (Default is "int") - - Returns: - :coordinates (list): list of x, y coordinates - """ - # initialize the list of (x, y)-coordinates - coordinates = np.zeros((68, 2), dtype=dtype) - # loop over the 68 facial landmarks and convert them to a 2-tuple of (x, y)-coordinates - for i in range(0, 68): - coordinates[i] = (shape.part(i).x, shape.part(i).y) - # return the list of (x, y)-coordinates - return coordinates - - -def rect_to_bb(rect): - """ - Function used for the dlib facial detector; it converts dlib's rectangle to a tuple (x, y, w, h) where x and y represent xmin and ymin - coordinates while w and h represent the width and the height - - Args: - :rect (dlib.rectangle): dlib rectangle object that represents the region of the image where a face is detected - - Returns: - :res (tuple): tuple that represents the region of the image where a face is detected in the form x, y, w, h - """ - # take a bounding predicted by dlib and convert it to the format (x, y, w, h) as we would normally do with OpenCV - x = rect.left() - y = rect.top() - w = rect.right() - x - h = rect.bottom() - y - # return a tuple of (x, y, w, h) - res = x, y, w, h - return res - - -def enlarge_bb(y_min, x_min, y_max, x_max, im_width, im_height): - """ - Enlarge the bounding box to include more background margin (used for face detection) - - Args: - :y_min (int): the top y coordinate of the bounding box - :x_min (int): the left x coordinate of the bounding box - :y_max (int): the bottom y coordinate of the bounding box - :x_max (int): the right x coordinate of the bounding box - :im_width (int): The width of the image - :im_height (int): The height of the image - - Returns: - :y_min (int): the top y coordinate of the bounding box after enlarging - :x_min (int): the left x coordinate of the bounding box after enlarging - :y_max (int): the bottom y coordinate of the bounding box after enlarging - :x_max (int): the right x coordinate of the bounding box after enlarging - """ - - y_min = int(max(0, y_min - abs(y_min - y_max) / 10)) - y_max = int(min(im_height, y_max + abs(y_min - y_max) / 10)) - x_min = int(max(0, x_min - abs(x_min - x_max) / 5)) - x_max = int(min(im_width, x_max + abs(x_min - x_max) / 4)) # 5 - x_max = int(min(x_max, im_width)) - return y_min, x_min, y_max, x_max - - -def linear_assignment(cost_matrix): - try: - import lap - _, x, y = lap.lapjv(cost_matrix, extend_cost=True) - return np.array([[y[i], i] for i in x if i >= 0]) - except ImportError: - from scipy.optimize import linear_sum_assignment - x, y = linear_sum_assignment(cost_matrix) - return np.array(list(zip(x, y))) - - -def iou_batch(bb_test, bb_gt): - """ - From SORT: Computes IUO between two bboxes in the form [x1,y1,x2,y2] - - Args: - :bb_test (): - :bb_gt (): - - Returns: - - """ - # print(bb_test, bb_gt) - bb_gt = np.expand_dims(bb_gt, 0) - bb_test = np.expand_dims(bb_test, 1) - - xx1 = np.maximum(bb_test[..., 0], bb_gt[..., 0]) - yy1 = np.maximum(bb_test[..., 1], bb_gt[..., 1]) - xx2 = np.minimum(bb_test[..., 2], bb_gt[..., 2]) - yy2 = np.minimum(bb_test[..., 3], bb_gt[..., 3]) - w = np.maximum(0., xx2 - xx1) - h = np.maximum(0., yy2 - yy1) - wh = w * h - o = wh / ((bb_test[..., 2] - bb_test[..., 0]) * (bb_test[..., 3] - bb_test[..., 1]) + (bb_gt[..., 2] - bb_gt[..., 0]) * ( - bb_gt[..., 3] - bb_gt[..., 1]) - wh) - return o - - -def convert_bbox_to_z(bbox): - """ - Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form [x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is - the aspect ratio - - Args: - :bbox (): - - Returns: - - """ - w = bbox[2] - bbox[0] - h = bbox[3] - bbox[1] - x = bbox[0] + w / 2. - y = bbox[1] + h / 2. - s = w * h # scale is just area - r = w / float(h) if float(h) != 0 else w - return np.array([x, y, s, r]).reshape((4, 1)) - - -def convert_x_to_bbox(x, score=None): - """ - Takes a bounding box in the centre form [x,y,s,r] and returns it in the form - [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right - - Args: - :x (): - :score (): - (Default is None) - - Returns: - - """ - w = np.sqrt(x[2] * x[3]) - h = x[2] / w - if score is None: - return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2.]).reshape((1, 4)) - else: - return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score]).reshape((1, 5)) - - -def associate_detections_to_trackers(detections, trackers, iou_threshold=0.3): - """ - Assigns detections to tracked object (both represented as bounding boxes) - Returns 3 lists of matches, unmatched_detections and unmatched_trackers - - Args: - :detections (): - :trackers (): - :iou_threshold (): - (Default is 0.3) - - Returns: - - """ - if len(trackers) == 0: - return np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int) - - iou_matrix = iou_batch(detections, trackers) - # print("IOU MATRIX: ", iou_matrix) - - if min(iou_matrix.shape) > 0: - a = (iou_matrix > iou_threshold).astype(np.int32) - if a.sum(1).max() == 1 and a.sum(0).max() == 1: - matched_indices = np.stack(np.where(a), axis=1) - else: - matched_indices = linear_assignment(-iou_matrix) - else: - matched_indices = np.empty(shape=(0, 2)) - - unmatched_detections = [] - for d, det in enumerate(detections): - if d not in matched_indices[:, 0]: - unmatched_detections.append(d) - unmatched_trackers = [] - for t, trk in enumerate(trackers): - if t not in matched_indices[:, 1]: - unmatched_trackers.append(t) - - # filter out matched with low IOU - matches = [] - for m in matched_indices: - if iou_matrix[m[0], m[1]] < iou_threshold: - unmatched_detections.append(m[0]) - unmatched_trackers.append(m[1]) - else: - matches.append(m.reshape(1, 2)) - if len(matches) == 0: - matches = np.empty((0, 2), dtype=int) - else: - matches = np.concatenate(matches, axis=0) - - return matches, np.array(unmatched_detections), np.array(unmatched_trackers) - - -def find_face_from_key_points(key_points, bboxes, image, person=None, openpose=False, gazefollow=True): - """ - - Args: - key_points: - bboxes: - image: - person: - openpose: - gazefollow: - - Returns: - - """ - - im_width, im_height = image.shape[1], image.shape[0] - - # key_points, bboxes = person.get_key_points()[-1], person.get_bboxes()[-1] - # print("PERSON ID:", person.get_id()) - - # 0 nose, 1/2 left/right eye, 3/4 left/right ear - # 5/6 leftShoulder/rightShoulder - # 7/8 leftElbow/rightElbow - # 9/10 leftWrist/rightWrist - # 11/12 leftHip/rightHip - # 13/14 leftKnee/rightKnee - # 15/16 leftAnkle/rightAnkle - # print(key_points) - - face_points = key_points[:7] - - if openpose: - face_points = [] - for point in key_points[:7]: - # print(point[2], type(point[2])) - if point[2] > 0.0: - face_points.append(point) - # print("face1", face_points) - - if len(face_points) == 0: - return None, [] - - # print("bboxe", bboxes, face_points) - if not gazefollow: - ct = compute_centroid(face_points) - - x_min, y_min = ct[0] - 10, ct[1] - 15 - x_max, y_max = ct[0] + 10, ct[1] + 10 - - y_min_bbox = y_min - - elif gazefollow: - # [l_shoulder, r_shoulder] = key_points[5:] - # print(l_shoulder, r_shoulder) - print("FACE", face_points) - if len(face_points) == 1: - return None, [] - - x_min, y_min, _ = np.amin(face_points, axis=0) - x_max, y_max, _ = np.amax(face_points, axis=0) - - # aux_diff = - # print("X: ", aux_diff) - # if aux_diff < 20: - # x_max += 20 - # x_min -= 20 - - aux_diff = y_max - y_min - print("y: ", aux_diff) - if aux_diff < 50: # rapporto xmax -xmin o altro - y_max += (x_max - x_min) / 1.4 - y_min -= (x_max - x_min) / 1.2 - # x_min -= 10 - # x_max += 10 - - y_min_bbox = int(y_min) # int(bboxes[1]) if bboxes is not None else y_min - (x_max-x_min) - # if bboxes is None: - # y_max = y_max + (x_max-x_min) - - y_min, x_min, y_max, x_max = enlarge_bb(y_min_bbox, x_min, y_max, x_max, im_width, im_height) - # print(y_min, x_min, y_max, x_max, y_max - y_min, x_max - x_min) - # if -1 < y_max - y_min < 5 and -1 < x_max - x_min < 5: # due punti uguali - # # print("AAAAA") - # return None, [] - - face_image = image[y_min:y_max, x_min:x_max] - - if person is not None: - # person.print_() - person.update_faces(face_image) - person.update_faces_coordinates([y_min, x_min, y_max, x_max]) - # person.update_faces_key_points(face_points) - # person.print_() - return None - else: - return face_image, [y_min, x_min, y_max, x_max] - - -def compute_interaction_cosine(head_position, target_position, gaze_direction): - """ - Computes the interaction between two people using the angle of view. - The interaction in measured as the cosine of the angle formed by the line from person A to B and the gaze direction of person A. - - Args: - :head_position (list): list of pixel coordinates [x, y] that represents the position of the head of person A - :target_position (list): list of pixel coordinates [x, y] that represents the position of head of person B - :gaze_direction (list): list that represents the gaze direction of the head of person A in the form [gx, gy] - - Returns: - :val (float): value that describe the quantity of interaction - """ - - if head_position == target_position: - return 0 # or -1 - else: - # direction from observer to target - direction = np.arctan2((target_position[1] - head_position[1]), (target_position[0] - head_position[0])) - direction_gaze = np.arctan2(gaze_direction[1], gaze_direction[0]) - difference = direction - direction_gaze - - # difference of the line joining observer -> target with the gazing direction, - val = np.cos(difference) - if val < 0: - return 0 - else: - return val - - -def compute_attention_from_vectors(list_objects): - """ - - Args: - :list_objects (): - - Returns: - - """ - - dict_person = dict() - id_list = [] - for obj in list_objects: - if len(obj.get_key_points()) > 0: - # print("Object ID: ", obj.get_id(), "x: ", obj.get_poses_vector_norm()[-1][0], "y: ", obj.get_poses_vector_norm()[-1][1]) - id_list.append(obj.get_id()) - - # print("kpts: ", obj.get_key_points()[-1]) - aux = [obj.get_key_points()[-1][j][:2] for j in [0, 2, 1, 4, 3]] - dict_person[obj.get_id()] = [obj.get_poses_vector_norm()[-1], np.mean(aux, axis=0).tolist()] - - attention_matrix = np.zeros((len(dict_person), len(dict_person)), dtype=np.float32) - - for i in range(attention_matrix.shape[0]): - for j in range(attention_matrix.shape[1]): - if i == j: - continue - attention_matrix[i][j] = compute_interaction_cosine(dict_person[i][1], dict_person[j][1], dict_person[i][0]) - - return attention_matrix.tolist(), id_list - - -def compute_attention_ypr(list_objects): - """ - - Args: - :list_objects (): - - Returns: - : - """ - - for obj in list_objects: - if len(obj.get_key_points()) > 0: - print("Object ID: ", obj.get_id(), "yaw: ", obj.get_poses_ypr()[-1][0], "pitch: ", obj.get_poses_ypr()[-1][1], "roll: ", - obj.get_poses_ypr()[-1][2]) - - -def save_key_points_to_json(ids, kpts, path_json, openpose=False): - """ - Save key points to .json format according to Openpose output format - - Args: - :kpts (): - :path_json (): - - Returns: - """ - - # print(path_json) - dict_file = {"version": 1.3} - list_dict_person = [] - for j in range(len(kpts)): - dict_person = {"person_id": [int(ids[j])], - "face_keypoints_2d": [], - "hand_left_keypoints_2d": [], - "hand_right_keypoints_2d": [], - "pose_keypoints_3d": [], - "face_keypoints_3d": [], - "hand_left_keypoints_3d": [], - "hand_right_keypoints_3d": []} - - kpts_openpose = np.zeros((25, 3)) - for i, point in enumerate(kpts[j]): - if openpose: - idx_op = rev_pose_id_part_openpose[pose_id_part_openpose[i]] - else: - idx_op = rev_pose_id_part_openpose[pose_id_part[i]] - # print(idx_op, point[1], point[0], point[2]) - kpts_openpose[idx_op] = [point[1], point[0], point[2]] # x, y, conf - - list_kpts_openpose = list(np.concatenate(kpts_openpose).ravel()) - dict_person["pose_keypoints_2d"] = list_kpts_openpose - # print(dict_person) - list_dict_person.append(dict_person) - - dict_file["people"] = list_dict_person - - # Serializing json - json_object = json.dumps(dict_file, indent=4) - - # Writing to sample.json - with open(path_json, "w") as outfile: - outfile.write(json_object) - - -def json_to_poses(json_data): - """ - - Args: - :js_data (): - - Returns: - :res (): - """ - poses = [] - confidences = [] - ids = [] - - for arr in json_data["people"]: - ids.append(arr["person_id"]) - confidences.append(arr["pose_keypoints_2d"][2::3]) - aux = arr["pose_keypoints_2d"][2::3] - arr = np.delete(arr["pose_keypoints_2d"], slice(2, None, 3)) - # print("B", list(zip(arr[::2], arr[1::2]))) - poses.append(list(zip(arr[::2], arr[1::2], aux))) - - return poses, confidences, ids - - -def parse_json1(aux): - # print(aux['people']) - list_kpts = [] - id_list = [] - for person in aux['people']: - # print(len(person['pose_keypoints_2d'])) - aux = person['pose_keypoints_2d'] - aux_kpts = [[aux[i+1], aux[i], aux[i+2]] for i in range(0, 75, 3)] - # print(len(aux_kpts)) - list_kpts.append(aux_kpts) - id_list.append(person['person_id']) - - # print(list_kpts) - return list_kpts, id_list - - -def load_poses_from_json1(json_filename): - """ - - Args: - :json_filename (): - - Returns: - :poses, conf: - """ - with open(json_filename) as data_file: - loaded = json.load(data_file) - zz = parse_json1(loaded) - return zz - - -def load_poses_from_json(json_filename): - """ - - Args: - :json_filename (): - - Returns: - :poses, conf: - """ - with open(json_filename) as data_file: - loaded = json.load(data_file) - poses, conf, ids = json_to_poses(loaded) - - if len(poses) < 1: # != 1: - return None, None, None - else: - return poses, conf, ids - - -def compute_head_features(img, pose, conf, open_pose=True): - """ - - Args: - img: - pose: - conf: - open_pose: - - Returns: - - """ - - joints = [0, 15, 16, 17, 18] if open_pose else [0, 2, 1, 4, 3] - - n_joints_set = [pose[joint] for joint in joints if joint_set(pose[joint])] # if open_pose else pose - - if len(n_joints_set) < 1: - return None, None - - centroid = compute_centroid(n_joints_set) - - # for j in n_joints_set: - # print(j, centroid) - max_dist = max([dist_2D([j[0], j[1]], centroid) for j in n_joints_set]) - - new_repr = [(np.array([pose[joint][0], pose[joint][1]]) - np.array(centroid)) for joint in joints] if open_pose else [ - (np.array(pose[i]) - np.array(centroid)) for i in range(len(n_joints_set))] - result = [] - - for i in range(0, 5): - - if joint_set(pose[joints[i]]): - if max_dist != 0.0: - result.append([new_repr[i][0] / max_dist, new_repr[i][1] / max_dist]) - else: - result.append([new_repr[i][0], new_repr[i][1]]) - else: - result.append([0, 0]) - - flat_list = [item for sublist in result for item in sublist] - - conf_list = [] - - for j in joints: - conf_list.append(conf[j]) - - return flat_list, conf_list, centroid - - -def compute_body_features(pose, conf): - """ - - Args: - pose: - conf: - - Returns: - - """ - joints = [0, 15, 16, 17, 18] - alljoints = range(0, 25) - - n_joints_set = [pose[joint] for joint in joints if joint_set(pose[joint])] - - if len(n_joints_set) < 1: - return None, None - - centroid = compute_centroid(n_joints_set) - - n_joints_set = [pose[joint] for joint in alljoints if joint_set(pose[joint])] - - max_dist = max([dist_2D(j, centroid) for j in n_joints_set]) - - new_repr = [(np.array(pose[joint]) - np.array(centroid)) for joint in alljoints] - - result = [] - - for i in range(0, 25): - - if joint_set(pose[i]): - result.append([new_repr[i][0] / max_dist, new_repr[i][1] / max_dist]) - else: - result.append([0, 0]) - - flat_list = [item for sublist in result for item in sublist] - - for j in alljoints: - flat_list.append(conf[j]) - - return flat_list, centroid - - -def compute_centroid(points): - """ - - Args: - points: - - Returns: - - """ - x, y = [], [] - for point in points: - if len(point) == 3: - if point[2] > 0.0: - x.append(point[0]) - y.append(point[1]) - else: - x.append(point[0]) - y.append(point[1]) - - # print(x, y) - if x == [] or y == []: - return [None, None] - mean_x = np.mean(x) - mean_y = np.mean(y) - - return [mean_x, mean_y] - - -def joint_set(p): - """ - - Args: - p: - - Returns: - - """ - return p[0] != 0.0 or p[1] != 0.0 - - -def dist_2D(p1, p2): - """ - - Args: - p1: - p2: - - Returns: - - """ - # print(p1) - # print(p2) - - p1 = np.array(p1) - p2 = np.array(p2) - - squared_dist = np.sum((p1 - p2) ** 2, axis=0) - return np.sqrt(squared_dist) - - -def compute_head_centroid(pose): - """ - - Args: - pose: - - Returns: - - """ - joints = [0, 15, 16, 17, 18] - - n_joints_set = [pose[joint] for joint in joints if joint_set(pose[joint])] - - # if len(n_joints_set) < 2: - # return None - - centroid = compute_centroid(n_joints_set) - - return centroid - - -def head_direction_to_json(path_json, norm_list, unc_list, ids_list, file_name): - - dict_file = {} - list_dict_person = [] - for k, i in enumerate(norm_list): - dict_person = {"id_person": [ids_list[k]], - "norm_xy": [i[0][0].item(), i[0][1].item()], # from numpy to native python type for json serilization - "center_xy": [int(i[1][0]), int(i[1][1])], - "uncertainty": [unc_list[k].item()]} - - list_dict_person.append(dict_person) - dict_file["people"] = list_dict_person - - json_object = json.dumps(dict_file, indent=4) - - with open(path_json, "w") as outfile: - outfile.write(json_object) - - -def ypr_to_json(path_json, yaw_list, pitch_list, roll_list, yaw_u_list, pitch_u_list, roll_u_list, ids_list, center_xy): - - dict_file = {} - list_dict_person = [] - for k in range(len(yaw_list)): - dict_person = {"id_person": [ids_list[k]], - "yaw": [yaw_list[k].item()], - "yaw_u": [yaw_u_list[k].item()], - "pitch": [pitch_list[k].item()], - "pitch_u": [pitch_u_list[k].item()], - "roll": [roll_list[k].item()], - "roll_u": [roll_u_list[k].item()], - "center_xy": [int(center_xy[k][0]), int(center_xy[k][1])]} - - list_dict_person.append(dict_person) - dict_file["people"] = list_dict_person - - json_object = json.dumps(dict_file, indent=4) - - with open(path_json, "w") as outfile: - outfile.write(json_object) - # exit() - - -def save_keypoints_image(img, poses, suffix_, path_save=''): - """ - Save the image with the key points drawn on it - Args: - img: - poses: - suffix_: - - Returns: - - """ - aux = img.copy() - for point in poses: - for i, p in enumerate(point): - if i in [0, 15, 16, 17, 18]: - cv2.circle(aux, (int(p[0]), int(p[1])), 2, (0, 255, 0), 2) - - cv2.imwrite(os.path.join(path_save, suffix_ + '.jpg'), aux) - - -def unit_vector(vector): - """ - Returns the unit vector of the vector. - - Args: - vector: - - Returns: - - """ - return vector / np.linalg.norm(vector) - - -def angle_between(v1, v2): - """ - Returns the angle in radians between vectors 'v1' and 'v2':: - - angle_between((1, 0, 0), (0, 1, 0)) - 1.5707963267948966 - angle_between((1, 0, 0), (1, 0, 0)) - 0.0 - angle_between((1, 0, 0), (-1, 0, 0)) - 3.141592653589793 - """ - # if not unit vector - v1_u = unit_vector(tuple(v1)) - v2_u = unit_vector(tuple(v2)) - angle = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) - return angle if angle < 1.80 else angle - 1.80 - - -def centroid_constraint(centroid, centroid_det, gazefollow=False): # x y - """ - - Args: - centroid: - centroid_det: - - Returns: - - """ - if centroid_det == [None, None]: - return False - - if gazefollow == False: - if 0 < centroid_det[0] < 143 and 0 < centroid_det[1] < 24: # centroid in the overprinted text of hour in the video - return False - if 0 < centroid_det[1] < 4: - return False - if centroid[0] - 3 < centroid_det[0] < centroid[0] + 3 and centroid[1] - 3 < centroid_det[1] < centroid[ - 1] + 3: # detected centroid near the gt centroid - return True - else: - return False - else: - if int(centroid[0] - 30) < int(centroid_det[0]) < int(centroid[0] + 30) and int(centroid[1] - 30) < int(centroid_det[1]) < int( - centroid[1] + 30): # detected centroid near the gt centroid - return True - else: - return False - - -def initialize_video_reader(path_video): - """ - - Args: - path_video: - - Returns: - - """ - cap = cv2.VideoCapture(path_video) - if cap is None or not cap.isOpened(): - print('Warning: unable to open video source: ', path_video) - exit(-1) - return cap - - -def distance_skeletons(kpts1, kpts2, dst_type): - """ - Function to compute the distance between skeletons - #TO DO - Args: - kpts1: - kpts2: - dts_type: - - Returns: - - """ - if len(kpts1) != len(kpts2): - print("Error: Different notation used for keypoints") - exit(-1) - - print(len(kpts1), len(kpts2)) - # to openpose notations - if len(kpts1) == len(kpts2) == 17: - kpts1, kpts2 = kpt_centernet_to_openpose(kpts1), kpt_centernet_to_openpose(kpts2) - print(len(kpts1), len(kpts2)) - - if len(kpts1) != 25 or len(kpts2) != 25: - print("Error") - exit(-1) - - res_dist = 0 - - if dst_type == 'all_points': - for i, _ in enumerate(kpts1): - res_dist += dist_2D(kpts1[i][:2], kpts2[i][:2]) - res_dist /= 25 - return res_dist - - elif dst_type == 'head_centroid': - top1_c, top2_c = compute_head_centroid(kpts1), compute_head_centroid(kpts2) - if top1_c == [None, None] or top2_c == [None, None]: - res_dist = 900 - else: - res_dist = dist_2D(top1_c[:2], top2_c[:2]) - return res_dist - - elif dst_type == 'three_centroids': - #TO DO - # top1_c, top2_c = compute_centroid(kpts1[0, 15, 16, 17, 18]), compute_centroid(kpts2[0, 15, 16, 17, 18]) - # mid1_c, mid2_c = compute_centroid(kpts1[2, 5, 9, 12]), compute_centroid(kpts2[2, 5, 9, 12]) - # btm1_c, btm2_c = compute_centroid(kpts1[9, 12, 10, 13]), compute_centroid(kpts2[9, 12, 10, 13]) - # res_dist = dist_2D(top1_c[:2], top2_c[:2]) + dist_2D(mid1_c[:2], mid2_c[:2]) + dist_2D(btm1_c[:2], btm2_c[:2]) - # res_dist /= 3 - # return res_dist - return None - - elif dst_type == '': - print("dst_typ not valid") - exit(-1) - - -def kpt_openpose_to_centernet(kpts): - """ - - Args: - kpts: - - Returns: - - """ - #TO TEST - kpts_openpose = np.zeros((16, 3)) - for i, point in enumerate(kpts): - idx_op = rev_pose_id_part[pose_id_part_openpose[i]] - kpts_openpose[idx_op] = [point[0], point[1], point[2]] - - return kpts_openpose - - -def kpt_centernet_to_openpose(kpts): - """ - - Args: - kpts: - - Returns: - - """ - #TO TEST - kpts_openpose = np.zeros((25, 3)) - for i, point in enumerate(kpts): - idx_op = rev_pose_id_part_openpose[pose_id_part[i]] - kpts_openpose[idx_op] = [point[1], point[0], point[2]] - - return kpts_openpose - - -def non_maxima_aux(det, kpt, threshold=15): # threshold in pxels - # print("A", kpt, "\n", len(kpt)) - - indexes_to_delete = [] - - if len(kpt) == 0 or len(det) == 0: - return [], [] - - if len(kpt) == 1 or len(det) == 1: - return det, kpt - - kpt_res = kpt.copy() - det_res_aux = det.copy() - - for i in range(0, len(kpt)): - for j in range(i, len(kpt)): - if i == j: - continue - dist = distance_skeletons(kpt[i], kpt[j], 'head_centroid') - # print("DIST", i, j, dist) - if dist < threshold: - if j not in indexes_to_delete: - indexes_to_delete.append(j) - # kpt_res.pop(j) - det_res = [] - - # print(indexes_to_delete) - indexes_to_delete = sorted(indexes_to_delete, reverse=True) - # print(len(kpt_res)) - for index in indexes_to_delete: - kpt_res.pop(index) - - det_res_aux = list(np.delete(det_res_aux, indexes_to_delete, axis=0)) - det_res = np.array(det_res_aux) - - return det_res, kpt_res - - -def compute_centroid_list(points): - """ - - Args: - points: - - Returns: - - """ - x, y = [], [] - for i in range(0, len(points), 3): - if points[i + 2] > 0.0: # confidence openpose - x.append(points[i]) - y.append(points[i + 1]) - - if x == [] or y == []: - return [None, None] - mean_x = np.mean(x) - mean_y = np.mean(y) - - return [mean_x, mean_y] - - -def normalize_wrt_maximum_distance_point(points, file_name=''): - centroid = compute_centroid_list(points) - # centroid = [points[0], points[1]] - # print(centroid) - # exit() - - max_dist_x, max_dist_y = 0, 0 - for i in range(0, len(points), 3): - if points[i + 2] > 0.0: # confidence openpose take only valid keypoints (if not detected (0, 0, 0) - distance_x = abs(points[i] - centroid[0]) - distance_y = abs(points[i+1] - centroid[1]) - # dist_aux.append(distance) - if distance_x > max_dist_x: - max_dist_x = distance_x - if distance_y > max_dist_y: - max_dist_y = distance_y - elif points[i + 2] == 0.0: # check for centernet people on borders with confidence 0 - points[i] = 0 - points[i+1] = 0 - - for i in range(0, len(points), 3): - if points[i + 2] > 0.0: - if max_dist_x != 0.0: - points[i] = (points[i] - centroid[0]) / max_dist_x - if max_dist_y != 0.0: - points[i + 1] = (points[i + 1] - centroid[1]) / max_dist_y - if max_dist_x == 0.0: # only one point valid with some confidence value so it become (0,0, confidence) - points[i] = 0.0 - if max_dist_y == 0.0: - points[i + 1] = 0.0 - - return points - - -def retrieve_interest_points(kpts, detector): - """ - - :param kpts: - :return: - """ - res_kpts = [] - - if detector == 'centernet': - face_points = [0, 1, 2, 3, 4] - for index in face_points: - res_kpts.append(kpts[index][1]) - res_kpts.append(kpts[index][0]) - res_kpts.append(kpts[index][2]) - elif detector== 'zedcam': - face_points = [0, 14, 15, 16, 17] - for index in face_points: - res_kpts.append(kpts[index][0]) - res_kpts.append(kpts[index][1]) - res_kpts.append(kpts[index][2]) - else: - # take only interest points (5 points of face) - face_points = [0, 16, 15, 18, 17] - for index in face_points: - res_kpts.append(kpts[index][0]) - res_kpts.append(kpts[index][1]) - res_kpts.append(kpts[index][2]) - - - - return res_kpts - -def create_bbox_from_openpose_keypoints(data): - # from labels import pose_id_part_openpose - bbox = list() - ids = list() - kpt = list() - kpt_scores = list() - for person in data['people']: - ids.append(person['person_id'][0]) - kpt_temp = list() - kpt_score_temp = list() - # create bbox with min max each dimension - x, y = [], [] - for i in pose_id_part_openpose: - if i < 25: - # kpt and kpts scores - kpt_temp.append([int(person['pose_keypoints_2d'][i * 3]), int(person['pose_keypoints_2d'][(i * 3) + 1]), - person['pose_keypoints_2d'][(i * 3) + 2]]) - kpt_score_temp.append(person['pose_keypoints_2d'][(i * 3) + 2]) - # check confidence != 0 - if person['pose_keypoints_2d'][(3 * i) + 2]!=0: - x.append(int(person['pose_keypoints_2d'][3 * i])) - y.append(int(person['pose_keypoints_2d'][(3 * i) + 1])) - kpt_scores.append(kpt_score_temp) - kpt.append(kpt_temp) - xmax = max(x) - xmin = min(x) - ymax = max(y) - ymin = min(y) - bbox.append([xmin, ymin, xmax, ymax, 1]) # last value is for compatibility of centernet - - return bbox, kpt, kpt_scores # not to use scores - -def atoi(text): - return int(text) if text.isdigit() else text - - -def natural_keys(text): - """ - alist.sort(key=natural_keys) sorts in human order - http://nedbatchelder.com/blog/200712/human_sorting.html - (See Toothy's implementation in the comments) - """ - import re - return [atoi(c) for c in re.split(r'(\d+)', text)] \ No newline at end of file diff --git "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" "b/spaces/Fengbinbin/gpt-academic/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" deleted file mode 100644 index f1fe20171cc54aec0c79f4961e71b57845f252d5..0000000000000000000000000000000000000000 --- "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\346\200\273\347\273\223word\346\226\207\346\241\243.py" +++ /dev/null @@ -1,127 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -fast_debug = False - - -def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, os - # pip install python-docx 用于docx格式,跨平台 - # pip install pywin32 用于doc格式,仅支持Win平台 - for index, fp in enumerate(file_manifest): - if fp.split(".")[-1] == "docx": - from docx import Document - doc = Document(fp) - file_content = "\n".join([para.text for para in doc.paragraphs]) - else: - import win32com.client - word = win32com.client.Dispatch("Word.Application") - word.visible = False - # 打开文件 - print('fp', os.getcwd()) - doc = word.Documents.Open(os.getcwd() + '/' + fp) - # file_content = doc.Content.Text - doc = word.ActiveDocument - file_content = doc.Range().Text - doc.Close() - word.Quit() - - print(file_content) - # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名 - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - from request_llm.bridge_all import model_info - max_token = model_info[llm_kwargs['llm_model']]['max_token'] - TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4 - paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( - txt=file_content, - get_token_fn=model_info[llm_kwargs['llm_model']]['token_cnt'], - limit=TOKEN_LIMIT_PER_FRAGMENT - ) - this_paper_history = [] - for i, paper_frag in enumerate(paper_fragments): - i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```' - i_say_show_user = f'请对下面的文章片段做概述: {os.path.abspath(fp)}的第{i+1}/{len(paper_fragments)}个片段。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt="总结文章。" - ) - - chatbot[-1] = (i_say_show_user, gpt_say) - history.extend([i_say_show_user,gpt_say]) - this_paper_history.extend([i_say_show_user,gpt_say]) - - # 已经对该文章的所有片段总结完毕,如果文章被切分了, - if len(paper_fragments) > 1: - i_say = f"根据以上的对话,总结文章{os.path.abspath(fp)}的主要内容。" - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=this_paper_history, - sys_prompt="总结文章。" - ) - - history.extend([i_say,gpt_say]) - this_paper_history.extend([i_say,gpt_say]) - - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - res = write_results_to_file(history) - chatbot.append(("所有文件都总结完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - -@CatchException -def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结Word文档。函数插件贡献者: JasonGuo1"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - from docx import Document - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 检测输入参数,如没有给定输入参数,直接退出 - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 搜索需要处理的文件清单 - if txt.endswith('.docx') or txt.endswith('.doc'): - file_manifest = [txt] - else: - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.docx', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.doc', recursive=True)] - - # 如果没找到任何文件 - if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 开始正式执行任务 - yield from 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) diff --git a/spaces/Fr33d0m21/stabilityai-stable-diffusion-2-1/app.py b/spaces/Fr33d0m21/stabilityai-stable-diffusion-2-1/app.py deleted file mode 100644 index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000 --- a/spaces/Fr33d0m21/stabilityai-stable-diffusion-2-1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch() \ No newline at end of file diff --git a/spaces/FridaZuley/RVC_HFKawaii/diffq/utils.py b/spaces/FridaZuley/RVC_HFKawaii/diffq/utils.py deleted file mode 100644 index be6ab5253c38564140bc202077292bb99f9f397b..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/diffq/utils.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import inspect -from typing import Optional, List - - -def simple_repr(obj, attrs: Optional[List[str]] = None, overrides={}): - """ - Return a simple representation string for `obj`. - If `attrs` is not None, it should be a list of attributes to include. - """ - params = inspect.signature(obj.__class__).parameters - attrs_repr = [] - if attrs is None: - attrs = params.keys() - for attr in attrs: - display = False - if attr in overrides: - value = overrides[attr] - elif hasattr(obj, attr): - value = getattr(obj, attr) - else: - continue - if attr in params: - param = params[attr] - if param.default is inspect._empty or value != param.default: - display = True - else: - display = True - - if display: - attrs_repr.append(f"{attr}={value}") - return f"{obj.__class__.__name__}({','.join(attrs_repr)})" diff --git a/spaces/GAURAVBRAR/AIGK/README.md b/spaces/GAURAVBRAR/AIGK/README.md deleted file mode 100644 index d7596128447263d958abb85ab6304adb6e225dbc..0000000000000000000000000000000000000000 --- a/spaces/GAURAVBRAR/AIGK/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AIGK -emoji: 🏃 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 4.1.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GaenKoki/voicevox/get_cost_candidates.py b/spaces/GaenKoki/voicevox/get_cost_candidates.py deleted file mode 100644 index 072c4b4d57a757c957a0a1e9ab0afb0c5c989cb0..0000000000000000000000000000000000000000 --- a/spaces/GaenKoki/voicevox/get_cost_candidates.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -voicevox_engine/part_of_speech_data.pyのcost_candidatesを計算するプログラムです。 -引数のnaist_jdic_pathには、open_jtalkのsrc/mecab-naist-jdic/naist-jdic.csvを指定してください。 - -実行例: -python get_cost_candidates.py --naist_jdic_path=/path/to/naist-jdic.csv \ - --pos=名詞 \ - --pos_detail_1=固有名詞 \ - --pos_detail_2=一般 \ - --pos_detail_3=* - -cost_candidatesの値の詳細は以下の通りです。 -- 1番目の値はnaist_jdic内の同一品詞の最小コストから1を引いたもの、11番目の値は最大コストに1を足したものです。 -- 2番目の値はnaist_jdic内の同一品詞のコストの下位1%、10番目の値は99%の値です。 -- 6番目の値はnaist_jdic内の同一品詞のコストの最頻値です。 -- 2番目から6番目、6番目から10番目までの値は一定割合で増加するようになっています。 -""" - -import argparse -import statistics -from pathlib import Path -from typing import List - -import numpy as np - - -def get_candidates( - naist_jdic_path: Path, - pos: str, - pos_detail_1: str, - pos_detail_2: str, - pos_detail_3: str, -) -> List[int]: - costs = [] - with naist_jdic_path.open(encoding="utf-8") as f: - for line in f: - ( - _, - _, - _, - _cost, - _pos, - _pos_detail_1, - _pos_detail_2, - _pos_detail_3, - _, - _, - _, - _, - _, - _, - _, - ) = line.split(",") - if (_pos, _pos_detail_1, _pos_detail_2, _pos_detail_3) == ( - pos, - pos_detail_1, - pos_detail_2, - pos_detail_3, - ): - costs.append(int(_cost)) - assert len(costs) > 0 - cost_min = min(costs) - 1 - cost_1per = np.quantile(costs, 0.01).astype(np.int64) - cost_mode = statistics.mode(costs) - cost_99per = np.quantile(costs, 0.99).astype(np.int64) - cost_max = max(costs) + 1 - return ( - [cost_min] - + [int(cost_1per + (cost_mode - cost_1per) * i / 4) for i in range(5)] - + [int(cost_mode + (cost_99per - cost_mode) * i / 4) for i in range(1, 5)] - + [cost_max] - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--naist_jdic_path", type=Path) - parser.add_argument("--pos", type=str) - parser.add_argument("--pos_detail_1", type=str) - parser.add_argument("--pos_detail_2", type=str) - parser.add_argument("--pos_detail_3", type=str) - args = parser.parse_args() - print( - get_candidates( - naist_jdic_path=args.naist_jdic_path, - pos=args.pos, - pos_detail_1=args.pos_detail_1, - pos_detail_2=args.pos_detail_2, - pos_detail_3=args.pos_detail_3, - ) - ) diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_cylinder_ball_match.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_cylinder_ball_match.py deleted file mode 100644 index c68700bfe287370c2b31f3ccbbbafc1370ab92f9..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_cylinder_ball_match.py +++ /dev/null @@ -1,62 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils -import pybullet as p - -class ColorCoordinatedCylinderBallMatch(Task): - """Pick up each ball and place it on top of the cylinder of the same color.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.lang_template = "place the {color} ball on the {color} cylinder" - self.task_completed_desc = "done placing balls on cylinders." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add cylinders. - # x, y, z dimensions for the asset size - cylinder_size = (0.04, 0.04, 0.1) - cylinder_urdf = 'cylinder/cylinder-template.urdf' - cylinder_colors = ['red', 'blue', 'green', 'yellow'] - cylinder_poses = [] - cylinders = [] - for color in cylinder_colors: - cylinder_pose = self.get_random_pose(env, cylinder_size) - cylinder_id = env.add_object(cylinder_urdf, cylinder_pose, color=color) - cylinder_poses.append(cylinder_pose) - cylinders.append(cylinder_id) - - # Add balls. - # x, y, z dimensions for the asset size - ball_size = (0.04, 0.04, 0.04) - ball_urdf = 'ball/ball-template.urdf' - balls = [] - for color in cylinder_colors: - ball_pose = self.get_random_pose(env, ball_size) - ball_id = env.add_object(ball_urdf, ball_pose, color=color) - balls.append(ball_id) - - # Add blocks as obstacles. - # x, y, z dimensions for the asset size - block_size = (0.04, 0.04, 0.04) - block_urdf = 'block/small.urdf' - for _ in range(5): - block_pose = self.get_random_pose(env, block_size) - env.add_object(block_urdf, block_pose) - - # Goal: each ball is on top of the cylinder of the same color. - for i in range(len(balls)): - self.add_goal(objs=[balls[i]], matches=np.ones((1, 1)), targ_poses=[cylinder_poses[i]], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1/len(balls), - language_goal=self.lang_template.format(color=cylinder_colors[i])) \ No newline at end of file diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/op/upfirdn2d.py b/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/op/upfirdn2d.py deleted file mode 100644 index 7bc5a1e331c2bbb1893ac748cfd0f144ff0651b4..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/op/upfirdn2d.py +++ /dev/null @@ -1,184 +0,0 @@ -import os - -import torch -from torch.autograd import Function -from torch.utils.cpp_extension import load - -module_path = os.path.dirname(__file__) -upfirdn2d_op = load( - 'upfirdn2d', - sources=[ - os.path.join(module_path, 'upfirdn2d.cpp'), - os.path.join(module_path, 'upfirdn2d_kernel.cu'), - ], -) - - -class UpFirDn2dBackward(Function): - @staticmethod - def forward( - ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size - ): - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_op.upfirdn2d( - grad_output, - grad_kernel, - down_x, - down_y, - up_x, - up_y, - g_pad_x0, - g_pad_x1, - g_pad_y0, - g_pad_y1, - ) - grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.in_size = in_size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) - - gradgrad_out = upfirdn2d_op.upfirdn2d( - gradgrad_input, - kernel, - ctx.up_x, - ctx.up_y, - ctx.down_x, - ctx.down_y, - ctx.pad_x0, - ctx.pad_x1, - ctx.pad_y0, - ctx.pad_y1, - ) - # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3]) - gradgrad_out = gradgrad_out.view( - ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1] - ) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - batch, channel, in_h, in_w = input.shape - ctx.in_size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_op.upfirdn2d( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 - ) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.in_size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = UpFirDn2d.apply( - input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1]) - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0): out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0): out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - - return out[:, ::down_y, ::down_x, :] diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/dynamic_rcnn/README.md b/spaces/Gradio-Blocks/uniformer_image_detection/configs/dynamic_rcnn/README.md deleted file mode 100644 index ffdc42dcdfddbaa946f81cba00e73b5573aa19fc..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/dynamic_rcnn/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training - -## Introduction - -[ALGORITHM] - -``` -@article{DynamicRCNN, - author = {Hongkai Zhang and Hong Chang and Bingpeng Ma and Naiyan Wang and Xilin Chen}, - title = {Dynamic {R-CNN}: Towards High Quality Object Detection via Dynamic Training}, - journal = {arXiv preprint arXiv:2004.06002}, - year = {2020} -} -``` - -## Results and Models - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | -|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:| -| R-50 | pytorch | 1x | 3.8 | | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x_20200618_095048.log.json) | diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py deleted file mode 100644 index a790d932152420f5be0a05b21ac122087d315398..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py' -# learning policy -lr_config = dict(step=[20, 23]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py deleted file mode 100644 index ef194cb594eb76316324066e23e48184d8cede27..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/exp/upernet_hybrid_small/run.sh b/spaces/Gradio-Blocks/uniformer_image_segmentation/exp/upernet_hybrid_small/run.sh deleted file mode 100644 index 9fb22edfa7a32624ea08a63fe7d720c40db3b696..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/exp/upernet_hybrid_small/run.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -work_path=$(dirname $0) -PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=8 \ - tools/train.py ${work_path}/config.py \ - --launcher pytorch \ - --options model.backbone.pretrained_path='your_model_path/uniformer_small_in1k.pth' \ - --work-dir ${work_path}/ckpt \ - 2>&1 | tee -a ${work_path}/log.txt diff --git a/spaces/GrandaddyShmax/MusicGen_Plus/audiocraft/quantization/base.py b/spaces/GrandaddyShmax/MusicGen_Plus/audiocraft/quantization/base.py deleted file mode 100644 index 1b16c130d266fbd021d3fc29bb9f98c33dd3c588..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus/audiocraft/quantization/base.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Base class for all quantizers. -""" - -from dataclasses import dataclass, field -import typing as tp - -import torch -from torch import nn - - -@dataclass -class QuantizedResult: - x: torch.Tensor - codes: torch.Tensor - bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item. - penalty: tp.Optional[torch.Tensor] = None - metrics: dict = field(default_factory=dict) - - -class BaseQuantizer(nn.Module): - """Base class for quantizers. - """ - - def forward(self, x: torch.Tensor, frame_rate: int) -> QuantizedResult: - """ - Given input tensor x, returns first the quantized (or approximately quantized) - representation along with quantized codes, bandwidth, and any penalty term for the loss. - Finally, this returns a dict of metrics to update logging etc. - Frame rate must be passed so that the bandwidth is properly computed. - """ - raise NotImplementedError() - - def encode(self, x: torch.Tensor) -> torch.Tensor: - """Encode a given input tensor with the specified sample rate at the given bandwidth. - """ - raise NotImplementedError() - - def decode(self, codes: torch.Tensor) -> torch.Tensor: - """Decode the given codes to the quantized representation. - """ - raise NotImplementedError() - - @property - def total_codebooks(self): - """Total number of codebooks. - """ - raise NotImplementedError() - - @property - def num_codebooks(self): - """Number of active codebooks. - """ - raise NotImplementedError() - - def set_num_codebooks(self, n: int): - """Set the number of active codebooks. - """ - raise NotImplementedError() - - -class DummyQuantizer(BaseQuantizer): - """Fake quantizer that actually does not perform any quantization. - """ - def __init__(self): - super().__init__() - - def forward(self, x: torch.Tensor, frame_rate: int): - q = x.unsqueeze(1) - return QuantizedResult(x, q, torch.tensor(q.numel() * 32 * frame_rate / 1000 / len(x)).to(x)) - - def encode(self, x: torch.Tensor) -> torch.Tensor: - """Encode a given input tensor with the specified sample rate at the given bandwidth. - In the case of the DummyQuantizer, the codes are actually identical - to the input and resulting quantized representation as no quantization is done. - """ - return x.unsqueeze(1) - - def decode(self, codes: torch.Tensor) -> torch.Tensor: - """Decode the given codes to the quantized representation. - In the case of the DummyQuantizer, the codes are actually identical - to the input and resulting quantized representation as no quantization is done. - """ - return codes.squeeze(1) - - @property - def total_codebooks(self): - """Total number of codebooks. - """ - return 1 - - @property - def num_codebooks(self): - """Total number of codebooks. - """ - return self.total_codebooks - - def set_num_codebooks(self, n: int): - """Set the number of active codebooks. - """ - raise AttributeError("Cannot override the number of codebooks for the dummy quantizer") diff --git a/spaces/Grezz/generate_human_motion/VQ-Trans/models/quantize_cnn.py b/spaces/Grezz/generate_human_motion/VQ-Trans/models/quantize_cnn.py deleted file mode 100644 index b796772749efda9a225bdcb0e7262791a972a710..0000000000000000000000000000000000000000 --- a/spaces/Grezz/generate_human_motion/VQ-Trans/models/quantize_cnn.py +++ /dev/null @@ -1,415 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -class QuantizeEMAReset(nn.Module): - def __init__(self, nb_code, code_dim, args): - super().__init__() - self.nb_code = nb_code - self.code_dim = code_dim - self.mu = args.mu - self.reset_codebook() - - def reset_codebook(self): - self.init = False - self.code_sum = None - self.code_count = None - if torch.cuda.is_available(): - self.register_buffer('codebook', torch.zeros(self.nb_code, self.code_dim).cuda()) - else: - self.register_buffer('codebook', torch.zeros(self.nb_code, self.code_dim)) - - def _tile(self, x): - nb_code_x, code_dim = x.shape - if nb_code_x < self.nb_code: - n_repeats = (self.nb_code + nb_code_x - 1) // nb_code_x - std = 0.01 / np.sqrt(code_dim) - out = x.repeat(n_repeats, 1) - out = out + torch.randn_like(out) * std - else : - out = x - return out - - def init_codebook(self, x): - out = self._tile(x) - self.codebook = out[:self.nb_code] - self.code_sum = self.codebook.clone() - self.code_count = torch.ones(self.nb_code, device=self.codebook.device) - self.init = True - - @torch.no_grad() - def compute_perplexity(self, code_idx) : - # Calculate new centres - code_onehot = torch.zeros(self.nb_code, code_idx.shape[0], device=code_idx.device) # nb_code, N * L - code_onehot.scatter_(0, code_idx.view(1, code_idx.shape[0]), 1) - - code_count = code_onehot.sum(dim=-1) # nb_code - prob = code_count / torch.sum(code_count) - perplexity = torch.exp(-torch.sum(prob * torch.log(prob + 1e-7))) - return perplexity - - @torch.no_grad() - def update_codebook(self, x, code_idx): - - code_onehot = torch.zeros(self.nb_code, x.shape[0], device=x.device) # nb_code, N * L - code_onehot.scatter_(0, code_idx.view(1, x.shape[0]), 1) - - code_sum = torch.matmul(code_onehot, x) # nb_code, w - code_count = code_onehot.sum(dim=-1) # nb_code - - out = self._tile(x) - code_rand = out[:self.nb_code] - - # Update centres - self.code_sum = self.mu * self.code_sum + (1. - self.mu) * code_sum # w, nb_code - self.code_count = self.mu * self.code_count + (1. - self.mu) * code_count # nb_code - - usage = (self.code_count.view(self.nb_code, 1) >= 1.0).float() - code_update = self.code_sum.view(self.nb_code, self.code_dim) / self.code_count.view(self.nb_code, 1) - - self.codebook = usage * code_update + (1 - usage) * code_rand - prob = code_count / torch.sum(code_count) - perplexity = torch.exp(-torch.sum(prob * torch.log(prob + 1e-7))) - - - return perplexity - - def preprocess(self, x): - # NCT -> NTC -> [NT, C] - x = x.permute(0, 2, 1).contiguous() - x = x.view(-1, x.shape[-1]) - return x - - def quantize(self, x): - # Calculate latent code x_l - k_w = self.codebook.t() - distance = torch.sum(x ** 2, dim=-1, keepdim=True) - 2 * torch.matmul(x, k_w) + torch.sum(k_w ** 2, dim=0, - keepdim=True) # (N * L, b) - _, code_idx = torch.min(distance, dim=-1) - return code_idx - - def dequantize(self, code_idx): - x = F.embedding(code_idx, self.codebook) - return x - - - def forward(self, x): - N, width, T = x.shape - - # Preprocess - x = self.preprocess(x) - - # Init codebook if not inited - if self.training and not self.init: - self.init_codebook(x) - - # quantize and dequantize through bottleneck - code_idx = self.quantize(x) - x_d = self.dequantize(code_idx) - - # Update embeddings - if self.training: - perplexity = self.update_codebook(x, code_idx) - else : - perplexity = self.compute_perplexity(code_idx) - - # Loss - commit_loss = F.mse_loss(x, x_d.detach()) - - # Passthrough - x_d = x + (x_d - x).detach() - - # Postprocess - x_d = x_d.view(N, T, -1).permute(0, 2, 1).contiguous() #(N, DIM, T) - - return x_d, commit_loss, perplexity - - - -class Quantizer(nn.Module): - def __init__(self, n_e, e_dim, beta): - super(Quantizer, self).__init__() - - self.e_dim = e_dim - self.n_e = n_e - self.beta = beta - - self.embedding = nn.Embedding(self.n_e, self.e_dim) - self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e) - - def forward(self, z): - - N, width, T = z.shape - z = self.preprocess(z) - assert z.shape[-1] == self.e_dim - z_flattened = z.contiguous().view(-1, self.e_dim) - - # B x V - d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \ - torch.sum(self.embedding.weight**2, dim=1) - 2 * \ - torch.matmul(z_flattened, self.embedding.weight.t()) - # B x 1 - min_encoding_indices = torch.argmin(d, dim=1) - z_q = self.embedding(min_encoding_indices).view(z.shape) - - # compute loss for embedding - loss = torch.mean((z_q - z.detach())**2) + self.beta * \ - torch.mean((z_q.detach() - z)**2) - - # preserve gradients - z_q = z + (z_q - z).detach() - z_q = z_q.view(N, T, -1).permute(0, 2, 1).contiguous() #(N, DIM, T) - - min_encodings = F.one_hot(min_encoding_indices, self.n_e).type(z.dtype) - e_mean = torch.mean(min_encodings, dim=0) - perplexity = torch.exp(-torch.sum(e_mean*torch.log(e_mean + 1e-10))) - return z_q, loss, perplexity - - def quantize(self, z): - - assert z.shape[-1] == self.e_dim - - # B x V - d = torch.sum(z ** 2, dim=1, keepdim=True) + \ - torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \ - torch.matmul(z, self.embedding.weight.t()) - # B x 1 - min_encoding_indices = torch.argmin(d, dim=1) - return min_encoding_indices - - def dequantize(self, indices): - - index_flattened = indices.view(-1) - z_q = self.embedding(index_flattened) - z_q = z_q.view(indices.shape + (self.e_dim, )).contiguous() - return z_q - - def preprocess(self, x): - # NCT -> NTC -> [NT, C] - x = x.permute(0, 2, 1).contiguous() - x = x.view(-1, x.shape[-1]) - return x - - - -class QuantizeReset(nn.Module): - def __init__(self, nb_code, code_dim, args): - super().__init__() - self.nb_code = nb_code - self.code_dim = code_dim - self.reset_codebook() - self.codebook = nn.Parameter(torch.randn(nb_code, code_dim)) - - def reset_codebook(self): - self.init = False - self.code_count = None - - def _tile(self, x): - nb_code_x, code_dim = x.shape - if nb_code_x < self.nb_code: - n_repeats = (self.nb_code + nb_code_x - 1) // nb_code_x - std = 0.01 / np.sqrt(code_dim) - out = x.repeat(n_repeats, 1) - out = out + torch.randn_like(out) * std - else : - out = x - return out - - def init_codebook(self, x): - out = self._tile(x) - self.codebook = nn.Parameter(out[:self.nb_code]) - self.code_count = torch.ones(self.nb_code, device=self.codebook.device) - self.init = True - - @torch.no_grad() - def compute_perplexity(self, code_idx) : - # Calculate new centres - code_onehot = torch.zeros(self.nb_code, code_idx.shape[0], device=code_idx.device) # nb_code, N * L - code_onehot.scatter_(0, code_idx.view(1, code_idx.shape[0]), 1) - - code_count = code_onehot.sum(dim=-1) # nb_code - prob = code_count / torch.sum(code_count) - perplexity = torch.exp(-torch.sum(prob * torch.log(prob + 1e-7))) - return perplexity - - def update_codebook(self, x, code_idx): - - code_onehot = torch.zeros(self.nb_code, x.shape[0], device=x.device) # nb_code, N * L - code_onehot.scatter_(0, code_idx.view(1, x.shape[0]), 1) - - code_count = code_onehot.sum(dim=-1) # nb_code - - out = self._tile(x) - code_rand = out[:self.nb_code] - - # Update centres - self.code_count = code_count # nb_code - usage = (self.code_count.view(self.nb_code, 1) >= 1.0).float() - - self.codebook.data = usage * self.codebook.data + (1 - usage) * code_rand - prob = code_count / torch.sum(code_count) - perplexity = torch.exp(-torch.sum(prob * torch.log(prob + 1e-7))) - - - return perplexity - - def preprocess(self, x): - # NCT -> NTC -> [NT, C] - x = x.permute(0, 2, 1).contiguous() - x = x.view(-1, x.shape[-1]) - return x - - def quantize(self, x): - # Calculate latent code x_l - k_w = self.codebook.t() - distance = torch.sum(x ** 2, dim=-1, keepdim=True) - 2 * torch.matmul(x, k_w) + torch.sum(k_w ** 2, dim=0, - keepdim=True) # (N * L, b) - _, code_idx = torch.min(distance, dim=-1) - return code_idx - - def dequantize(self, code_idx): - x = F.embedding(code_idx, self.codebook) - return x - - - def forward(self, x): - N, width, T = x.shape - # Preprocess - x = self.preprocess(x) - # Init codebook if not inited - if self.training and not self.init: - self.init_codebook(x) - # quantize and dequantize through bottleneck - code_idx = self.quantize(x) - x_d = self.dequantize(code_idx) - # Update embeddings - if self.training: - perplexity = self.update_codebook(x, code_idx) - else : - perplexity = self.compute_perplexity(code_idx) - - # Loss - commit_loss = F.mse_loss(x, x_d.detach()) - - # Passthrough - x_d = x + (x_d - x).detach() - - # Postprocess - x_d = x_d.view(N, T, -1).permute(0, 2, 1).contiguous() #(N, DIM, T) - - return x_d, commit_loss, perplexity - -class QuantizeEMA(nn.Module): - def __init__(self, nb_code, code_dim, args): - super().__init__() - self.nb_code = nb_code - self.code_dim = code_dim - self.mu = 0.99 - self.reset_codebook() - - def reset_codebook(self): - self.init = False - self.code_sum = None - self.code_count = None - self.register_buffer('codebook', torch.zeros(self.nb_code, self.code_dim).cuda()) - - def _tile(self, x): - nb_code_x, code_dim = x.shape - if nb_code_x < self.nb_code: - n_repeats = (self.nb_code + nb_code_x - 1) // nb_code_x - std = 0.01 / np.sqrt(code_dim) - out = x.repeat(n_repeats, 1) - out = out + torch.randn_like(out) * std - else : - out = x - return out - - def init_codebook(self, x): - out = self._tile(x) - self.codebook = out[:self.nb_code] - self.code_sum = self.codebook.clone() - self.code_count = torch.ones(self.nb_code, device=self.codebook.device) - self.init = True - - @torch.no_grad() - def compute_perplexity(self, code_idx) : - # Calculate new centres - code_onehot = torch.zeros(self.nb_code, code_idx.shape[0], device=code_idx.device) # nb_code, N * L - code_onehot.scatter_(0, code_idx.view(1, code_idx.shape[0]), 1) - - code_count = code_onehot.sum(dim=-1) # nb_code - prob = code_count / torch.sum(code_count) - perplexity = torch.exp(-torch.sum(prob * torch.log(prob + 1e-7))) - return perplexity - - @torch.no_grad() - def update_codebook(self, x, code_idx): - - code_onehot = torch.zeros(self.nb_code, x.shape[0], device=x.device) # nb_code, N * L - code_onehot.scatter_(0, code_idx.view(1, x.shape[0]), 1) - - code_sum = torch.matmul(code_onehot, x) # nb_code, w - code_count = code_onehot.sum(dim=-1) # nb_code - - # Update centres - self.code_sum = self.mu * self.code_sum + (1. - self.mu) * code_sum # w, nb_code - self.code_count = self.mu * self.code_count + (1. - self.mu) * code_count # nb_code - - code_update = self.code_sum.view(self.nb_code, self.code_dim) / self.code_count.view(self.nb_code, 1) - - self.codebook = code_update - prob = code_count / torch.sum(code_count) - perplexity = torch.exp(-torch.sum(prob * torch.log(prob + 1e-7))) - - return perplexity - - def preprocess(self, x): - # NCT -> NTC -> [NT, C] - x = x.permute(0, 2, 1).contiguous() - x = x.view(-1, x.shape[-1]) - return x - - def quantize(self, x): - # Calculate latent code x_l - k_w = self.codebook.t() - distance = torch.sum(x ** 2, dim=-1, keepdim=True) - 2 * torch.matmul(x, k_w) + torch.sum(k_w ** 2, dim=0, - keepdim=True) # (N * L, b) - _, code_idx = torch.min(distance, dim=-1) - return code_idx - - def dequantize(self, code_idx): - x = F.embedding(code_idx, self.codebook) - return x - - - def forward(self, x): - N, width, T = x.shape - - # Preprocess - x = self.preprocess(x) - - # Init codebook if not inited - if self.training and not self.init: - self.init_codebook(x) - - # quantize and dequantize through bottleneck - code_idx = self.quantize(x) - x_d = self.dequantize(code_idx) - - # Update embeddings - if self.training: - perplexity = self.update_codebook(x, code_idx) - else : - perplexity = self.compute_perplexity(code_idx) - - # Loss - commit_loss = F.mse_loss(x, x_d.detach()) - - # Passthrough - x_d = x + (x_d - x).detach() - - # Postprocess - x_d = x_d.view(N, T, -1).permute(0, 2, 1).contiguous() #(N, DIM, T) - - return x_d, commit_loss, perplexity \ No newline at end of file diff --git a/spaces/HALLA/HALL-E/index.html b/spaces/HALLA/HALL-E/index.html deleted file mode 100644 index 74d65ba18bf356ce52b1d00b0e7c1903d5e285f2..0000000000000000000000000000000000000000 --- a/spaces/HALLA/HALL-E/index.html +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - - - - - - - - - - - - - - - -
- - - diff --git a/spaces/HaHaBill/LandShapes-Antarctica/models/biggan/pytorch_biggan/pytorch_pretrained_biggan/convert_tf_to_pytorch.py b/spaces/HaHaBill/LandShapes-Antarctica/models/biggan/pytorch_biggan/pytorch_pretrained_biggan/convert_tf_to_pytorch.py deleted file mode 100644 index 7ccb787dec188e9dbd9ea31288c049c1bdb30f95..0000000000000000000000000000000000000000 --- a/spaces/HaHaBill/LandShapes-Antarctica/models/biggan/pytorch_biggan/pytorch_pretrained_biggan/convert_tf_to_pytorch.py +++ /dev/null @@ -1,312 +0,0 @@ -# coding: utf-8 -""" -Convert a TF Hub model for BigGAN in a PT one. -""" -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from itertools import chain - -import os -import argparse -import logging -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.functional import normalize - -from .model import BigGAN, WEIGHTS_NAME, CONFIG_NAME -from .config import BigGANConfig - -logger = logging.getLogger(__name__) - - -def extract_batch_norm_stats(tf_model_path, batch_norm_stats_path=None): - try: - import numpy as np - import tensorflow as tf - import tensorflow_hub as hub - except ImportError: - raise ImportError("Loading a TensorFlow models in PyTorch, requires TensorFlow and TF Hub to be installed. " - "Please see https://www.tensorflow.org/install/ for installation instructions for TensorFlow. " - "And see https://github.com/tensorflow/hub for installing Hub. " - "Probably pip install tensorflow tensorflow-hub") - tf.reset_default_graph() - logger.info('Loading BigGAN module from: {}'.format(tf_model_path)) - module = hub.Module(tf_model_path) - inputs = {k: tf.placeholder(v.dtype, v.get_shape().as_list(), k) - for k, v in module.get_input_info_dict().items()} - output = module(inputs) - - initializer = tf.global_variables_initializer() - sess = tf.Session() - stacks = sum(((i*10 + 1, i*10 + 3, i*10 + 6, i*10 + 8) for i in range(50)), ()) - numpy_stacks = [] - for i in stacks: - logger.info("Retrieving module_apply_default/stack_{}".format(i)) - try: - stack_var = tf.get_default_graph().get_tensor_by_name("module_apply_default/stack_%d:0" % i) - except KeyError: - break # We have all the stats - numpy_stacks.append(sess.run(stack_var)) - - if batch_norm_stats_path is not None: - torch.save(numpy_stacks, batch_norm_stats_path) - else: - return numpy_stacks - - -def build_tf_to_pytorch_map(model, config): - """ Build a map from TF variables to PyTorch modules. """ - tf_to_pt_map = {} - - # Embeddings and GenZ - tf_to_pt_map.update({'linear/w/ema_0.9999': model.embeddings.weight, - 'Generator/GenZ/G_linear/b/ema_0.9999': model.generator.gen_z.bias, - 'Generator/GenZ/G_linear/w/ema_0.9999': model.generator.gen_z.weight_orig, - 'Generator/GenZ/G_linear/u0': model.generator.gen_z.weight_u}) - - # GBlock blocks - model_layer_idx = 0 - for i, (up, in_channels, out_channels) in enumerate(config.layers): - if i == config.attention_layer_position: - model_layer_idx += 1 - layer_str = "Generator/GBlock_%d/" % i if i > 0 else "Generator/GBlock/" - layer_pnt = model.generator.layers[model_layer_idx] - for i in range(4): # Batchnorms - batch_str = layer_str + ("BatchNorm_%d/" % i if i > 0 else "BatchNorm/") - batch_pnt = getattr(layer_pnt, 'bn_%d' % i) - for name in ('offset', 'scale'): - sub_module_str = batch_str + name + "/" - sub_module_pnt = getattr(batch_pnt, name) - tf_to_pt_map.update({sub_module_str + "w/ema_0.9999": sub_module_pnt.weight_orig, - sub_module_str + "u0": sub_module_pnt.weight_u}) - for i in range(4): # Convolutions - conv_str = layer_str + "conv%d/" % i - conv_pnt = getattr(layer_pnt, 'conv_%d' % i) - tf_to_pt_map.update({conv_str + "b/ema_0.9999": conv_pnt.bias, - conv_str + "w/ema_0.9999": conv_pnt.weight_orig, - conv_str + "u0": conv_pnt.weight_u}) - model_layer_idx += 1 - - # Attention block - layer_str = "Generator/attention/" - layer_pnt = model.generator.layers[config.attention_layer_position] - tf_to_pt_map.update({layer_str + "gamma/ema_0.9999": layer_pnt.gamma}) - for pt_name, tf_name in zip(['snconv1x1_g', 'snconv1x1_o_conv', 'snconv1x1_phi', 'snconv1x1_theta'], - ['g/', 'o_conv/', 'phi/', 'theta/']): - sub_module_str = layer_str + tf_name - sub_module_pnt = getattr(layer_pnt, pt_name) - tf_to_pt_map.update({sub_module_str + "w/ema_0.9999": sub_module_pnt.weight_orig, - sub_module_str + "u0": sub_module_pnt.weight_u}) - - # final batch norm and conv to rgb - layer_str = "Generator/BatchNorm/" - layer_pnt = model.generator.bn - tf_to_pt_map.update({layer_str + "offset/ema_0.9999": layer_pnt.bias, - layer_str + "scale/ema_0.9999": layer_pnt.weight}) - layer_str = "Generator/conv_to_rgb/" - layer_pnt = model.generator.conv_to_rgb - tf_to_pt_map.update({layer_str + "b/ema_0.9999": layer_pnt.bias, - layer_str + "w/ema_0.9999": layer_pnt.weight_orig, - layer_str + "u0": layer_pnt.weight_u}) - return tf_to_pt_map - - -def load_tf_weights_in_biggan(model, config, tf_model_path, batch_norm_stats_path=None): - """ Load tf checkpoints and standing statistics in a pytorch model - """ - try: - import numpy as np - import tensorflow as tf - except ImportError: - raise ImportError("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " - "https://www.tensorflow.org/install/ for installation instructions.") - # Load weights from TF model - checkpoint_path = tf_model_path + "/variables/variables" - init_vars = tf.train.list_variables(checkpoint_path) - from pprint import pprint - pprint(init_vars) - - # Extract batch norm statistics from model if needed - if batch_norm_stats_path: - stats = torch.load(batch_norm_stats_path) - else: - logger.info("Extracting batch norm stats") - stats = extract_batch_norm_stats(tf_model_path) - - # Build TF to PyTorch weights loading map - tf_to_pt_map = build_tf_to_pytorch_map(model, config) - - tf_weights = {} - for name in tf_to_pt_map.keys(): - array = tf.train.load_variable(checkpoint_path, name) - tf_weights[name] = array - # logger.info("Loading TF weight {} with shape {}".format(name, array.shape)) - - # Load parameters - with torch.no_grad(): - pt_params_pnt = set() - for name, pointer in tf_to_pt_map.items(): - array = tf_weights[name] - if pointer.dim() == 1: - if pointer.dim() < array.ndim: - array = np.squeeze(array) - elif pointer.dim() == 2: # Weights - array = np.transpose(array) - elif pointer.dim() == 4: # Convolutions - array = np.transpose(array, (3, 2, 0, 1)) - else: - raise "Wrong dimensions to adjust: " + str((pointer.shape, array.shape)) - if pointer.shape != array.shape: - raise ValueError("Wrong dimensions: " + str((pointer.shape, array.shape))) - logger.info("Initialize PyTorch weight {} with shape {}".format(name, pointer.shape)) - pointer.data = torch.from_numpy(array) if isinstance(array, np.ndarray) else torch.tensor(array) - tf_weights.pop(name, None) - pt_params_pnt.add(pointer.data_ptr()) - - # Prepare SpectralNorm buffers by running one step of Spectral Norm (no need to train the model): - for module in model.modules(): - for n, buffer in module.named_buffers(): - if n == 'weight_v': - weight_mat = module.weight_orig - weight_mat = weight_mat.reshape(weight_mat.size(0), -1) - u = module.weight_u - - v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=config.eps) - buffer.data = v - pt_params_pnt.add(buffer.data_ptr()) - - u = normalize(torch.mv(weight_mat, v), dim=0, eps=config.eps) - module.weight_u.data = u - pt_params_pnt.add(module.weight_u.data_ptr()) - - # Load batch norm statistics - index = 0 - for layer in model.generator.layers: - if not hasattr(layer, 'bn_0'): - continue - for i in range(4): # Batchnorms - bn_pointer = getattr(layer, 'bn_%d' % i) - pointer = bn_pointer.running_means - if pointer.shape != stats[index].shape: - raise "Wrong dimensions: " + str((pointer.shape, stats[index].shape)) - pointer.data = torch.from_numpy(stats[index]) - pt_params_pnt.add(pointer.data_ptr()) - - pointer = bn_pointer.running_vars - if pointer.shape != stats[index+1].shape: - raise "Wrong dimensions: " + str((pointer.shape, stats[index].shape)) - pointer.data = torch.from_numpy(stats[index+1]) - pt_params_pnt.add(pointer.data_ptr()) - - index += 2 - - bn_pointer = model.generator.bn - pointer = bn_pointer.running_means - if pointer.shape != stats[index].shape: - raise "Wrong dimensions: " + str((pointer.shape, stats[index].shape)) - pointer.data = torch.from_numpy(stats[index]) - pt_params_pnt.add(pointer.data_ptr()) - - pointer = bn_pointer.running_vars - if pointer.shape != stats[index+1].shape: - raise "Wrong dimensions: " + str((pointer.shape, stats[index].shape)) - pointer.data = torch.from_numpy(stats[index+1]) - pt_params_pnt.add(pointer.data_ptr()) - - remaining_params = list(n for n, t in chain(model.named_parameters(), model.named_buffers()) \ - if t.data_ptr() not in pt_params_pnt) - - logger.info("TF Weights not copied to PyTorch model: {} -".format(', '.join(tf_weights.keys()))) - logger.info("Remanining parameters/buffers from PyTorch model: {} -".format(', '.join(remaining_params))) - - return model - - -BigGAN128 = BigGANConfig(output_dim=128, z_dim=128, class_embed_dim=128, channel_width=128, num_classes=1000, - layers=[(False, 16, 16), - (True, 16, 16), - (False, 16, 16), - (True, 16, 8), - (False, 8, 8), - (True, 8, 4), - (False, 4, 4), - (True, 4, 2), - (False, 2, 2), - (True, 2, 1)], - attention_layer_position=8, eps=1e-4, n_stats=51) - -BigGAN256 = BigGANConfig(output_dim=256, z_dim=128, class_embed_dim=128, channel_width=128, num_classes=1000, - layers=[(False, 16, 16), - (True, 16, 16), - (False, 16, 16), - (True, 16, 8), - (False, 8, 8), - (True, 8, 8), - (False, 8, 8), - (True, 8, 4), - (False, 4, 4), - (True, 4, 2), - (False, 2, 2), - (True, 2, 1)], - attention_layer_position=8, eps=1e-4, n_stats=51) - -BigGAN512 = BigGANConfig(output_dim=512, z_dim=128, class_embed_dim=128, channel_width=128, num_classes=1000, - layers=[(False, 16, 16), - (True, 16, 16), - (False, 16, 16), - (True, 16, 8), - (False, 8, 8), - (True, 8, 8), - (False, 8, 8), - (True, 8, 4), - (False, 4, 4), - (True, 4, 2), - (False, 2, 2), - (True, 2, 1), - (False, 1, 1), - (True, 1, 1)], - attention_layer_position=8, eps=1e-4, n_stats=51) - - -def main(): - parser = argparse.ArgumentParser(description="Convert a BigGAN TF Hub model in a PyTorch model") - parser.add_argument("--model_type", type=str, default="", required=True, - help="BigGAN model type (128, 256, 512)") - parser.add_argument("--tf_model_path", type=str, default="", required=True, - help="Path of the downloaded TF Hub model") - parser.add_argument("--pt_save_path", type=str, default="", - help="Folder to save the PyTorch model (default: Folder of the TF Hub model)") - parser.add_argument("--batch_norm_stats_path", type=str, default="", - help="Path of previously extracted batch norm statistics") - args = parser.parse_args() - - logging.basicConfig(level=logging.INFO) - - if not args.pt_save_path: - args.pt_save_path = args.tf_model_path - - if args.model_type == "128": - config = BigGAN128 - elif args.model_type == "256": - config = BigGAN256 - elif args.model_type == "512": - config = BigGAN512 - else: - raise ValueError("model_type should be one of 128, 256 or 512") - - model = BigGAN(config) - model = load_tf_weights_in_biggan(model, config, args.tf_model_path, args.batch_norm_stats_path) - - model_save_path = os.path.join(args.pt_save_path, WEIGHTS_NAME) - config_save_path = os.path.join(args.pt_save_path, CONFIG_NAME) - - logger.info("Save model dump to {}".format(model_save_path)) - torch.save(model.state_dict(), model_save_path) - logger.info("Save configuration file to {}".format(config_save_path)) - with open(config_save_path, "w", encoding="utf-8") as f: - f.write(config.to_json_string()) - -if __name__ == "__main__": - main() diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py deleted file mode 100644 index 2ea37c16b4a477c48e4dd4500ec03f2d0c86d611..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -from fairseq import metrics, utils -from fairseq.criterions import register_criterion - -from .label_smoothed_cross_entropy import ( - LabelSmoothedCrossEntropyCriterion, - LabelSmoothedCrossEntropyCriterionConfig, -) - -from dataclasses import dataclass, field - - -@dataclass -class LabelSmoothedCrossEntropyCriterionWithAlignmentConfig( - LabelSmoothedCrossEntropyCriterionConfig -): - alignment_lambda: float = field( - default=0.05, metadata={"help": "weight for the alignment loss"} - ) - - -@register_criterion( - "label_smoothed_cross_entropy_with_alignment", - dataclass=LabelSmoothedCrossEntropyCriterionWithAlignmentConfig, -) -class LabelSmoothedCrossEntropyCriterionWithAlignment( - LabelSmoothedCrossEntropyCriterion -): - def __init__(self, task, sentence_avg, label_smoothing, alignment_lambda): - super().__init__(task, sentence_avg, label_smoothing) - self.alignment_lambda = alignment_lambda - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(**sample["net_input"]) - loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce) - sample_size = ( - sample["target"].size(0) if self.sentence_avg else sample["ntokens"] - ) - logging_output = { - "loss": utils.item(loss.data) if reduce else loss.data, - "nll_loss": utils.item(nll_loss.data) if reduce else nll_loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["target"].size(0), - "sample_size": sample_size, - } - - alignment_loss = None - - # Compute alignment loss only for training set and non dummy batches. - if "alignments" in sample and sample["alignments"] is not None: - alignment_loss = self.compute_alignment_loss(sample, net_output) - - if alignment_loss is not None: - logging_output["alignment_loss"] = utils.item(alignment_loss.data) - loss += self.alignment_lambda * alignment_loss - - return loss, sample_size, logging_output - - def compute_alignment_loss(self, sample, net_output): - attn_prob = net_output[1]["attn"][0] - bsz, tgt_sz, src_sz = attn_prob.shape - attn = attn_prob.view(bsz * tgt_sz, src_sz) - - align = sample["alignments"] - align_weights = sample["align_weights"].float() - - if len(align) > 0: - # Alignment loss computation. align (shape [:, 2]) contains the src-tgt index pairs corresponding to - # the alignments. align_weights (shape [:]) contains the 1 / frequency of a tgt index for normalizing. - loss = -( - (attn[align[:, 1][:, None], align[:, 0][:, None]]).log() - * align_weights[:, None] - ).sum() - else: - return None - - return loss - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) - nll_loss_sum = utils.item( - sum(log.get("nll_loss", 0) for log in logging_outputs) - ) - alignment_loss_sum = utils.item( - sum(log.get("alignment_loss", 0) for log in logging_outputs) - ) - ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) - sample_size = utils.item( - sum(log.get("sample_size", 0) for log in logging_outputs) - ) - - metrics.log_scalar( - "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 - ) - metrics.log_scalar( - "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3 - ) - metrics.log_scalar( - "alignment_loss", - alignment_loss_sum / sample_size / math.log(2), - sample_size, - round=3, - ) - metrics.log_derived( - "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) - ) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/__init__.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/modules.py b/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/modules.py deleted file mode 100644 index a192251aaccb036780d77d6c8b538b652a5e24e2..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/modules.py +++ /dev/null @@ -1,276 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -import commons - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-4): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - n_dims = len(x.shape) - mean = torch.mean(x, 1, keepdim=True) - variance = torch.mean((x - mean) ** 2, 1, keepdim=True) - - x = (x - mean) * torch.rsqrt(variance + self.eps) - - shape = [1, -1] + [1] * (n_dims - 2) - x = x * self.gamma.view(*shape) + self.beta.view(*shape) - return x - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - assert hidden_channels % 2 == 0 - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask=None, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - x_in = self.drop(x_in) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - x = (x + res_skip_acts[:, : self.hidden_channels, :]) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ActNorm(nn.Module): - def __init__(self, channels, ddi=False, **kwargs): - super().__init__() - self.channels = channels - self.initialized = not ddi - - self.logs = nn.Parameter(torch.zeros(1, channels, 1)) - self.bias = nn.Parameter(torch.zeros(1, channels, 1)) - - def forward(self, x, x_mask=None, reverse=False, **kwargs): - if x_mask is None: - x_mask = torch.ones(x.size(0), 1, x.size(2)).to( - device=x.device, dtype=x.dtype - ) - x_len = torch.sum(x_mask, [1, 2]) - if not self.initialized: - self.initialize(x, x_mask) - self.initialized = True - - if reverse: - z = (x - self.bias) * torch.exp(-self.logs) * x_mask - logdet = None - else: - z = (self.bias + torch.exp(self.logs) * x) * x_mask - logdet = torch.sum(self.logs) * x_len # [b] - - return z, logdet - - def store_inverse(self): - pass - - def set_ddi(self, ddi): - self.initialized = not ddi - - def initialize(self, x, x_mask): - with torch.no_grad(): - denom = torch.sum(x_mask, [0, 2]) - m = torch.sum(x * x_mask, [0, 2]) / denom - m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom - v = m_sq - (m ** 2) - logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6)) - - bias_init = ( - (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype) - ) - logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype) - - self.bias.data.copy_(bias_init) - self.logs.data.copy_(logs_init) - - -class InvConvNear(nn.Module): - def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs): - super().__init__() - assert n_split % 2 == 0 - self.channels = channels - self.n_split = n_split - self.no_jacobian = no_jacobian - - w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).normal_())[0] - if torch.det(w_init) < 0: - w_init[:, 0] = -1 * w_init[:, 0] - self.weight = nn.Parameter(w_init) - - def forward(self, x, x_mask=None, reverse=False, **kwargs): - b, c, t = x.size() - assert c % self.n_split == 0 - if x_mask is None: - x_mask = 1 - x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t - else: - x_len = torch.sum(x_mask, [1, 2]) - - x = x.view(b, 2, c // self.n_split, self.n_split // 2, t) - x = ( - x.permute(0, 1, 3, 2, 4) - .contiguous() - .view(b, self.n_split, c // self.n_split, t) - ) - - if reverse: - if hasattr(self, "weight_inv"): - weight = self.weight_inv - else: - weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype) - logdet = None - else: - weight = self.weight - if self.no_jacobian: - logdet = 0 - else: - logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b] - - weight = weight.view(self.n_split, self.n_split, 1, 1) - z = F.conv2d(x, weight) - - z = z.view(b, 2, self.n_split // 2, c // self.n_split, t) - z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask - return z, logdet - - def store_inverse(self): - self.weight_inv = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype) diff --git a/spaces/Heckeroo/waifu-diffusion/README.md b/spaces/Heckeroo/waifu-diffusion/README.md deleted file mode 100644 index a615352daa14ce97a4c767b8e8541f928b5bc2e5..0000000000000000000000000000000000000000 --- a/spaces/Heckeroo/waifu-diffusion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Waifu Diffusion -emoji: 🐠 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.13.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/HenryJJ/llm_template/app.py b/spaces/HenryJJ/llm_template/app.py deleted file mode 100644 index 0c2065c43f496cc224b21dfb251215dc97ae24a5..0000000000000000000000000000000000000000 --- a/spaces/HenryJJ/llm_template/app.py +++ /dev/null @@ -1,64 +0,0 @@ -import gradio as gr -import openai -import json -from pymongo import MongoClient -import os - -# Initialize MongoDB client -client = MongoClient(os.environ['DB_URL']) - -db = client['test'] -collection = db['gradio'] - -def get_saved_data(): - saved_data = collection.find({}, {"_id": 0, "name": 1}) - options = [item['name'] for item in saved_data] - return options - -def chat_with_gpt(question, api_key, temperature, system_message): - openai.api_key = api_key - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "system", "content": system_message}, - {"role": "user", "content": question} - ], - temperature=temperature - ) - assistant_reply = response['choices'][0]['message']['content'] - return f"{assistant_reply}" - -def update_dropdown_choices(): - new_choices = get_saved_data() - return gr.Dropdown.update(choices=new_choices) - -def save_to_mongodb(name, question, system_message): - if not name.strip(): # Check if name is empty or just whitespace - return "Please enter a name.", None # Return a message and None to indicate no update for the dropdown - collection.insert_one({"name": name, "question": question, "system_message": system_message}) - return "Saved to MongoDB.", update_dropdown_choices() - -def update_textboxes(selected_name): - selected_data = collection.find_one({"name": selected_name}, {"_id": 0}) - return selected_data['question'], selected_data['system_message'], selected_data['name'] - - -with gr.Blocks() as app: - saved_data_dropdown = gr.Dropdown(get_saved_data(), label="Select Saved Data") - name = gr.Textbox(lines=1, placeholder="Name", label="Name") - question = gr.Textbox(lines=2, placeholder="What's your question?", label="Question") - api_key = gr.Textbox(lines=1, placeholder="Your OpenAI API Key", label="API Key") - temperature = gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="Temperature") - system_message = gr.Textbox(lines=1, placeholder="System Message (Optional)", value="You are a helpful assistant.", label="System Message") - - with gr.Row(): - chat_btn = gr.Button("Chat with GPT") - save_btn = gr.Button("Share to community") - - output = gr.Textbox(label="Result", interactive=False) - - chat_btn.click(chat_with_gpt, inputs=[question, api_key, temperature, system_message], outputs=output) - save_btn.click(save_to_mongodb, inputs=[name, question, system_message], outputs=[output, saved_data_dropdown]) - saved_data_dropdown.select(update_textboxes, inputs=[saved_data_dropdown], outputs=[question, system_message, name]) - -app.launch() diff --git a/spaces/ICML2022/OFA/fairseq/examples/truncated_bptt/transformer_xl_model.py b/spaces/ICML2022/OFA/fairseq/examples/truncated_bptt/transformer_xl_model.py deleted file mode 100644 index a6c8b25a07276c2ee30c0aa5f0e4b0a2837ed5ca..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/truncated_bptt/transformer_xl_model.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from dataclasses import dataclass, field -from typing import Dict, List, Optional - -import torch -from fairseq.dataclass import FairseqDataclass -from fairseq.models import ( - FairseqIncrementalDecoder, - FairseqLanguageModel, - register_model, -) -from fairseq.modules.checkpoint_activations import checkpoint_wrapper -from omegaconf import II - - -logger = logging.getLogger(__name__) - - -@dataclass -class TransformerXLConfig(FairseqDataclass): - # defaults come from the original Transformer-XL code - cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000]) - d_model: int = 500 - n_head: int = 10 - d_head: int = 50 - d_inner: int = 1000 - div_val: int = 1 - n_layer: int = 12 - mem_len: int = 0 - clamp_len: int = -1 - same_length: bool = False - dropout: float = 0.0 - dropatt: float = 0.0 - checkpoint_activations: bool = False - offload_activations: bool = False - max_target_positions: int = II("task.max_target_positions") - - -@register_model("transformer_xl", dataclass=TransformerXLConfig) -class TransformerXLLanguageModel(FairseqLanguageModel): - @classmethod - def build_model(cls, cfg: TransformerXLConfig, task): - return cls(TransformerXLDecoder(cfg, task)) - - -class TransformerXLDecoder(FairseqIncrementalDecoder): - def __init__(self, cfg, task): - try: - from transformers.models.transfo_xl import ( - TransfoXLConfig, - TransfoXLLMHeadModel, - ) - except ImportError: - from transformers.configuration_transfo_xl import TransfoXLConfig - from transformers.modeling_transfo_xl import TransfoXLLMHeadModel - - super().__init__(task.target_dictionary) - self.cfg = cfg - - # remove any cutoffs larger than the vocab size - cutoffs = [ - cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary) - ] - - config = TransfoXLConfig( - vocab_size=len(task.target_dictionary), - cutoffs=cutoffs, - d_model=cfg.d_model, - d_embed=cfg.d_model, - n_head=cfg.n_head, - d_head=cfg.d_head, - d_inner=cfg.d_inner, - div_val=cfg.div_val, - n_layer=cfg.n_layer, - mem_len=cfg.mem_len, - clamp_len=cfg.clamp_len, - same_length=cfg.same_length, - dropout=cfg.dropout, - dropatt=cfg.dropatt, - ) - logger.info(config) - self.model = TransfoXLLMHeadModel(config) - - # Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax`` - # which adds ``None`` values to an ``nn.ParameterList``, which is not - # supported in PyTorch. Instead we can replace this with an - # ``nn.ModuleList``, which does support ``None`` values. - try: - if all(p is None for p in self.model.crit.out_projs._parameters.values()): - self.model.crit.out_projs = torch.nn.ModuleList( - [None] * len(self.model.crit.out_projs._parameters) - ) - except Exception: - pass - - if cfg.checkpoint_activations or cfg.offload_activations: - for i in range(len(self.model.transformer.layers)): - self.model.transformer.layers[i] = checkpoint_wrapper( - self.model.transformer.layers[i], - offload_to_cpu=cfg.offload_activations, - ) - # TODO: may save mem to wrap(layer.pos_ff.CoreNet[3]) - - self._mems = None - - def forward( - self, - src_tokens, - src_lengths=None, # unused - incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, - encoder_out=None, - ): - if incremental_state is not None: # used during inference - mems = self.get_incremental_state(incremental_state, "mems") - src_tokens = src_tokens[:, -1:] # only keep the most recent token - else: - mems = self._mems - - output = self.model( - input_ids=src_tokens, - mems=mems, - return_dict=False, - ) - - if len(output) >= 2: - if incremental_state is not None: - self.set_incremental_state(incremental_state, "mems", output[1]) - else: - self._mems = output[1] - - return (output[0],) - - def max_positions(self): - return self.cfg.max_target_positions - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]], - new_order: torch.Tensor, - ): - """Reorder incremental state. - - This will be called when the order of the input has changed from the - previous time step. A typical use case is beam search, where the input - order changes between time steps based on the selection of beams. - """ - mems = self.get_incremental_state(incremental_state, "mems") - if mems is not None: - new_mems = [mems_i.index_select(1, new_order) for mems_i in mems] - self.set_incremental_state(incremental_state, "mems", new_mems) diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/__init__.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/__init__.py deleted file mode 100644 index d7a030e2b5cbca30e6a4ca4f8a17a62a8cf197af..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/__init__.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -from .adaptive_input import AdaptiveInput -from .adaptive_softmax import AdaptiveSoftmax -from .base_layer import BaseLayer -from .beamable_mm import BeamableMM -from .character_token_embedder import CharacterTokenEmbedder -from .conv_tbc import ConvTBC -from .cross_entropy import cross_entropy -from .downsampled_multihead_attention import DownsampledMultiHeadAttention -from .dynamic_convolution import DynamicConv, DynamicConv1dTBC -from .dynamic_crf_layer import DynamicCRF -from .fairseq_dropout import FairseqDropout -from .fp32_group_norm import Fp32GroupNorm -from .gelu import gelu, gelu_accurate -from .grad_multiply import GradMultiply -from .gumbel_vector_quantizer import GumbelVectorQuantizer -from .kmeans_vector_quantizer import KmeansVectorQuantizer -from .layer_drop import LayerDropModuleList -from .layer_norm import Fp32LayerNorm, LayerNorm -from .learned_positional_embedding import LearnedPositionalEmbedding -from .lightweight_convolution import LightweightConv, LightweightConv1dTBC -from .linearized_convolution import LinearizedConvolution -from .location_attention import LocationAttention -from .lstm_cell_with_zoneout import LSTMCellWithZoneOut -from .multihead_attention import MultiheadAttention -from .positional_embedding import PositionalEmbedding -from .same_pad import SamePad -from .scalar_bias import ScalarBias -from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding -from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer -from .transformer_sentence_encoder import TransformerSentenceEncoder -from .transpose_last import TransposeLast -from .unfold import unfold1d -from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer -from .vggblock import VGGBlock - -__all__ = [ - "AdaptiveInput", - "AdaptiveSoftmax", - "BaseLayer", - "BeamableMM", - "CharacterTokenEmbedder", - "ConvTBC", - "cross_entropy", - "DownsampledMultiHeadAttention", - "DynamicConv1dTBC", - "DynamicConv", - "DynamicCRF", - "FairseqDropout", - "Fp32GroupNorm", - "Fp32LayerNorm", - "gelu", - "gelu_accurate", - "GradMultiply", - "GumbelVectorQuantizer", - "KmeansVectorQuantizer", - "LayerDropModuleList", - "LayerNorm", - "LearnedPositionalEmbedding", - "LightweightConv1dTBC", - "LightweightConv", - "LinearizedConvolution", - "LocationAttention", - "LSTMCellWithZoneOut", - "MultiheadAttention", - "PositionalEmbedding", - "SamePad", - "ScalarBias", - "SinusoidalPositionalEmbedding", - "TransformerSentenceEncoderLayer", - "TransformerSentenceEncoder", - "TransformerDecoderLayer", - "TransformerEncoderLayer", - "TransposeLast", - "VGGBlock", - "unfold1d", -] diff --git a/spaces/IDEA-Research/Grounded-SAM/segment_anything/scripts/export_onnx_model.py b/spaces/IDEA-Research/Grounded-SAM/segment_anything/scripts/export_onnx_model.py deleted file mode 100644 index 8ec5c2ec24fc53cd9fdf66564cfe163b9eb26c24..0000000000000000000000000000000000000000 --- a/spaces/IDEA-Research/Grounded-SAM/segment_anything/scripts/export_onnx_model.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from segment_anything import build_sam, build_sam_vit_b, build_sam_vit_l -from segment_anything.utils.onnx import SamOnnxModel - -import argparse -import warnings - -try: - import onnxruntime # type: ignore - - onnxruntime_exists = True -except ImportError: - onnxruntime_exists = False - -parser = argparse.ArgumentParser( - description="Export the SAM prompt encoder and mask decoder to an ONNX model." -) - -parser.add_argument( - "--checkpoint", type=str, required=True, help="The path to the SAM model checkpoint." -) - -parser.add_argument( - "--output", type=str, required=True, help="The filename to save the ONNX model to." -) - -parser.add_argument( - "--model-type", - type=str, - default="default", - help="In ['default', 'vit_b', 'vit_l']. Which type of SAM model to export.", -) - -parser.add_argument( - "--return-single-mask", - action="store_true", - help=( - "If true, the exported ONNX model will only return the best mask, " - "instead of returning multiple masks. For high resolution images " - "this can improve runtime when upscaling masks is expensive." - ), -) - -parser.add_argument( - "--opset", - type=int, - default=17, - help="The ONNX opset version to use. Must be >=11", -) - -parser.add_argument( - "--quantize-out", - type=str, - default=None, - help=( - "If set, will quantize the model and save it with this name. " - "Quantization is performed with quantize_dynamic from onnxruntime.quantization.quantize." - ), -) - -parser.add_argument( - "--gelu-approximate", - action="store_true", - help=( - "Replace GELU operations with approximations using tanh. Useful " - "for some runtimes that have slow or unimplemented erf ops, used in GELU." - ), -) - -parser.add_argument( - "--use-stability-score", - action="store_true", - help=( - "Replaces the model's predicted mask quality score with the stability " - "score calculated on the low resolution masks using an offset of 1.0. " - ), -) - -parser.add_argument( - "--return-extra-metrics", - action="store_true", - help=( - "The model will return five results: (masks, scores, stability_scores, " - "areas, low_res_logits) instead of the usual three. This can be " - "significantly slower for high resolution outputs." - ), -) - - -def run_export( - model_type: str, - checkpoint: str, - output: str, - opset: int, - return_single_mask: bool, - gelu_approximate: bool = False, - use_stability_score: bool = False, - return_extra_metrics=False, -): - print("Loading model...") - if model_type == "vit_b": - sam = build_sam_vit_b(checkpoint) - elif model_type == "vit_l": - sam = build_sam_vit_l(checkpoint) - else: - sam = build_sam(checkpoint) - - onnx_model = SamOnnxModel( - model=sam, - return_single_mask=return_single_mask, - use_stability_score=use_stability_score, - return_extra_metrics=return_extra_metrics, - ) - - if gelu_approximate: - for n, m in onnx_model.named_modules(): - if isinstance(m, torch.nn.GELU): - m.approximate = "tanh" - - dynamic_axes = { - "point_coords": {1: "num_points"}, - "point_labels": {1: "num_points"}, - } - - embed_dim = sam.prompt_encoder.embed_dim - embed_size = sam.prompt_encoder.image_embedding_size - mask_input_size = [4 * x for x in embed_size] - dummy_inputs = { - "image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float), - "point_coords": torch.randint(low=0, high=1024, size=(1, 5, 2), dtype=torch.float), - "point_labels": torch.randint(low=0, high=4, size=(1, 5), dtype=torch.float), - "mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float), - "has_mask_input": torch.tensor([1], dtype=torch.float), - "orig_im_size": torch.tensor([1500, 2250], dtype=torch.float), - } - - _ = onnx_model(**dummy_inputs) - - output_names = ["masks", "iou_predictions", "low_res_masks"] - - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=torch.jit.TracerWarning) - warnings.filterwarnings("ignore", category=UserWarning) - with open(output, "wb") as f: - print(f"Exporing onnx model to {output}...") - torch.onnx.export( - onnx_model, - tuple(dummy_inputs.values()), - f, - export_params=True, - verbose=False, - opset_version=opset, - do_constant_folding=True, - input_names=list(dummy_inputs.keys()), - output_names=output_names, - dynamic_axes=dynamic_axes, - ) - - if onnxruntime_exists: - ort_inputs = {k: to_numpy(v) for k, v in dummy_inputs.items()} - ort_session = onnxruntime.InferenceSession(output) - _ = ort_session.run(None, ort_inputs) - print("Model has successfully been run with ONNXRuntime.") - - -def to_numpy(tensor): - return tensor.cpu().numpy() - - -if __name__ == "__main__": - args = parser.parse_args() - run_export( - model_type=args.model_type, - checkpoint=args.checkpoint, - output=args.output, - opset=args.opset, - return_single_mask=args.return_single_mask, - gelu_approximate=args.gelu_approximate, - use_stability_score=args.use_stability_score, - return_extra_metrics=args.return_extra_metrics, - ) - - if args.quantize_out is not None: - assert onnxruntime_exists, "onnxruntime is required to quantize the model." - from onnxruntime.quantization import QuantType # type: ignore - from onnxruntime.quantization.quantize import quantize_dynamic # type: ignore - - print(f"Quantizing model and writing to {args.quantize_out}...") - quantize_dynamic( - model_input=args.output, - model_output=args.quantize_out, - optimize_model=True, - per_channel=False, - reduce_range=False, - weight_type=QuantType.QUInt8, - ) - print("Done!") diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/losses/basic_loss.py b/spaces/Iceclear/StableSR/StableSR/basicsr/losses/basic_loss.py deleted file mode 100644 index d2e965526a9b0e2686575bf93f0173cc2664d9bb..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/losses/basic_loss.py +++ /dev/null @@ -1,253 +0,0 @@ -import torch -from torch import nn as nn -from torch.nn import functional as F - -from basicsr.archs.vgg_arch import VGGFeatureExtractor -from basicsr.utils.registry import LOSS_REGISTRY -from .loss_util import weighted_loss - -_reduction_modes = ['none', 'mean', 'sum'] - - -@weighted_loss -def l1_loss(pred, target): - return F.l1_loss(pred, target, reduction='none') - - -@weighted_loss -def mse_loss(pred, target): - return F.mse_loss(pred, target, reduction='none') - - -@weighted_loss -def charbonnier_loss(pred, target, eps=1e-12): - return torch.sqrt((pred - target)**2 + eps) - - -@LOSS_REGISTRY.register() -class L1Loss(nn.Module): - """L1 (mean absolute error, MAE) loss. - - Args: - loss_weight (float): Loss weight for L1 loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - """ - - def __init__(self, loss_weight=1.0, reduction='mean'): - super(L1Loss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None. - """ - return self.loss_weight * l1_loss(pred, target, weight, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class MSELoss(nn.Module): - """MSE (L2) loss. - - Args: - loss_weight (float): Loss weight for MSE loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - """ - - def __init__(self, loss_weight=1.0, reduction='mean'): - super(MSELoss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None. - """ - return self.loss_weight * mse_loss(pred, target, weight, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class CharbonnierLoss(nn.Module): - """Charbonnier loss (one variant of Robust L1Loss, a differentiable - variant of L1Loss). - - Described in "Deep Laplacian Pyramid Networks for Fast and Accurate - Super-Resolution". - - Args: - loss_weight (float): Loss weight for L1 loss. Default: 1.0. - reduction (str): Specifies the reduction to apply to the output. - Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'. - eps (float): A value used to control the curvature near zero. Default: 1e-12. - """ - - def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12): - super(CharbonnierLoss, self).__init__() - if reduction not in ['none', 'mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}') - - self.loss_weight = loss_weight - self.reduction = reduction - self.eps = eps - - def forward(self, pred, target, weight=None, **kwargs): - """ - Args: - pred (Tensor): of shape (N, C, H, W). Predicted tensor. - target (Tensor): of shape (N, C, H, W). Ground truth tensor. - weight (Tensor, optional): of shape (N, C, H, W). Element-wise weights. Default: None. - """ - return self.loss_weight * charbonnier_loss(pred, target, weight, eps=self.eps, reduction=self.reduction) - - -@LOSS_REGISTRY.register() -class WeightedTVLoss(L1Loss): - """Weighted TV loss. - - Args: - loss_weight (float): Loss weight. Default: 1.0. - """ - - def __init__(self, loss_weight=1.0, reduction='mean'): - if reduction not in ['mean', 'sum']: - raise ValueError(f'Unsupported reduction mode: {reduction}. Supported ones are: mean | sum') - super(WeightedTVLoss, self).__init__(loss_weight=loss_weight, reduction=reduction) - - def forward(self, pred, weight=None): - if weight is None: - y_weight = None - x_weight = None - else: - y_weight = weight[:, :, :-1, :] - x_weight = weight[:, :, :, :-1] - - y_diff = super().forward(pred[:, :, :-1, :], pred[:, :, 1:, :], weight=y_weight) - x_diff = super().forward(pred[:, :, :, :-1], pred[:, :, :, 1:], weight=x_weight) - - loss = x_diff + y_diff - - return loss - - -@LOSS_REGISTRY.register() -class PerceptualLoss(nn.Module): - """Perceptual loss with commonly used style loss. - - Args: - layer_weights (dict): The weight for each layer of vgg feature. - Here is an example: {'conv5_4': 1.}, which means the conv5_4 - feature layer (before relu5_4) will be extracted with weight - 1.0 in calculating losses. - vgg_type (str): The type of vgg network used as feature extractor. - Default: 'vgg19'. - use_input_norm (bool): If True, normalize the input image in vgg. - Default: True. - range_norm (bool): If True, norm images with range [-1, 1] to [0, 1]. - Default: False. - perceptual_weight (float): If `perceptual_weight > 0`, the perceptual - loss will be calculated and the loss will multiplied by the - weight. Default: 1.0. - style_weight (float): If `style_weight > 0`, the style loss will be - calculated and the loss will multiplied by the weight. - Default: 0. - criterion (str): Criterion used for perceptual loss. Default: 'l1'. - """ - - def __init__(self, - layer_weights, - vgg_type='vgg19', - use_input_norm=True, - range_norm=False, - perceptual_weight=1.0, - style_weight=0., - criterion='l1'): - super(PerceptualLoss, self).__init__() - self.perceptual_weight = perceptual_weight - self.style_weight = style_weight - self.layer_weights = layer_weights - self.vgg = VGGFeatureExtractor( - layer_name_list=list(layer_weights.keys()), - vgg_type=vgg_type, - use_input_norm=use_input_norm, - range_norm=range_norm) - - self.criterion_type = criterion - if self.criterion_type == 'l1': - self.criterion = torch.nn.L1Loss() - elif self.criterion_type == 'l2': - self.criterion = torch.nn.L2loss() - elif self.criterion_type == 'fro': - self.criterion = None - else: - raise NotImplementedError(f'{criterion} criterion has not been supported.') - - def forward(self, x, gt): - """Forward function. - - Args: - x (Tensor): Input tensor with shape (n, c, h, w). - gt (Tensor): Ground-truth tensor with shape (n, c, h, w). - - Returns: - Tensor: Forward results. - """ - # extract vgg features - x_features = self.vgg(x) - gt_features = self.vgg(gt.detach()) - - # calculate perceptual loss - if self.perceptual_weight > 0: - percep_loss = 0 - for k in x_features.keys(): - if self.criterion_type == 'fro': - percep_loss += torch.norm(x_features[k] - gt_features[k], p='fro') * self.layer_weights[k] - else: - percep_loss += self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k] - percep_loss *= self.perceptual_weight - else: - percep_loss = None - - # calculate style loss - if self.style_weight > 0: - style_loss = 0 - for k in x_features.keys(): - if self.criterion_type == 'fro': - style_loss += torch.norm( - self._gram_mat(x_features[k]) - self._gram_mat(gt_features[k]), p='fro') * self.layer_weights[k] - else: - style_loss += self.criterion(self._gram_mat(x_features[k]), self._gram_mat( - gt_features[k])) * self.layer_weights[k] - style_loss *= self.style_weight - else: - style_loss = None - - return percep_loss, style_loss - - def _gram_mat(self, x): - """Calculate Gram matrix. - - Args: - x (torch.Tensor): Tensor with shape of (n, c, h, w). - - Returns: - torch.Tensor: Gram matrix. - """ - n, c, h, w = x.size() - features = x.view(n, c, w * h) - features_t = features.transpose(1, 2) - gram = features.bmm(features_t) / (c * h * w) - return gram diff --git a/spaces/Illumotion/Koboldcpp/examples/benchmark/benchmark-matmult.cpp b/spaces/Illumotion/Koboldcpp/examples/benchmark/benchmark-matmult.cpp deleted file mode 100644 index f1c382aa9b9557a2636b8ca8d6703cc27c03d362..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/examples/benchmark/benchmark-matmult.cpp +++ /dev/null @@ -1,275 +0,0 @@ -#include "build-info.h" -#include "common.h" -#include "ggml.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(_MSC_VER) -#pragma warning(disable: 4244 4267) // possible loss of data -#endif - -static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * graph, int n_threads) { - struct ggml_cplan plan = ggml_graph_plan(graph, n_threads); - - if (plan.work_size > 0) { - buf.resize(plan.work_size); - plan.work_data = buf.data(); - } - - ggml_graph_compute(graph, &plan); -} - -static float tensor_sum_elements(const ggml_tensor * tensor) { - double sum = 0; - if (tensor->type == GGML_TYPE_F32) { - for (int j = 0; j < tensor->ne[1]; j++) { - for (int k = 0; k < tensor->ne[0]; k++) { - sum += ((float *) tensor->data)[j*tensor->ne[0] + k]; - } - } - } - return sum; -} - -static void tensor_dump(const ggml_tensor * tensor, const char * name) { - printf("%15s: type = %i (%5s) ne = %5" PRIi64 " x %5" PRIi64 " x %5" PRIi64 ", nb = (%5zi, %5zi, %5zi) - ", name, - tensor->type, ggml_type_name(tensor->type), - tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->nb[0], tensor->nb[1], tensor->nb[2]); - float sum = tensor_sum_elements(tensor); - printf("Sum of tensor %s is %6.2f\n", name, sum); -} - -#define TENSOR_DUMP(tensor) tensor_dump(tensor, #tensor) - -struct benchmark_params_struct { - int32_t n_threads = 1; - int32_t n_iterations = 10; -}; - -static void print_usage(int /*argc*/, char ** argv, struct benchmark_params_struct params) { - fprintf(stderr, "usage: %s [options]\n", argv[0]); - fprintf(stderr, "\n"); - fprintf(stderr, "options:\n"); - fprintf(stderr, " -h, --help show this help message and exit\n"); - fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); - fprintf(stderr, " -i N, --iter N number of iterations to use during computation (default: %d)\n", params.n_iterations); - fprintf(stderr, "\n"); -} - -int main(int argc, char ** argv) { - struct benchmark_params_struct benchmark_params; - - bool invalid_param = false; - std::string arg; - for (int i = 1; i < argc; i++) { - arg = argv[i]; - - if (arg == "-t" || arg == "--threads") { - if (++i >= argc) { - invalid_param = true; - break; - } - benchmark_params.n_threads = std::stoi(argv[i]); - } else if (arg == "-i" || arg == "--iter") { - if (++i >= argc) { - invalid_param = true; - break; - } - benchmark_params.n_iterations = std::stoi(argv[i]); - } else if (arg == "-h" || arg == "--help") { - print_usage(argc, argv, benchmark_params); - exit(0); - } - } - if (invalid_param) { - fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str()); - print_usage(argc, argv, benchmark_params); - exit(1); - } - - print_build_info(); - printf("Starting Test\n"); - - // create the ggml context - struct ggml_context * ctx; - //const int sizex = 4096; - //const int sizey = 11008; - -#undef VERBOSE_DEBUGGING -#ifndef VERBOSE_DEBUGGING - const int sizey = 4096; - const int sizex = 11008; - const int sizez = 128; -#else - /* Working - let's increase size */ - const int sizey = 1; - const int sizex = (8*32); - const int sizez = 1; - - /*const int sizey = 1; - const int sizex = 3*(8*32); - const int sizez = 1;*/ -#endif - - //printf("Memsize required = %i\n", sizex*sizex); - - // TODO: perform the bench for all types or for a user specified type - const ggml_type qtype = GGML_TYPE_Q4_1; - - size_t ctx_size = 0; - ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); - ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); - ctx_size += sizex*sizez*ggml_type_sizef(GGML_TYPE_F32); - ctx_size += sizex*sizey*ggml_type_sizef(qtype); - ctx_size += sizex*sizey*ggml_type_sizef(qtype); - ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS - ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS - ctx_size += 1024*1024*16; - - printf("Allocating Memory of size %zi bytes, %zi MB\n",ctx_size, (ctx_size/1024/1024)); - - struct ggml_init_params params = { - /*.mem_size =*/ ctx_size, - /*.mem_buffer =*/ NULL, - /* no_alloc =*/ 0 - }; - - ctx = ggml_init(params); - if (!ctx) { - fprintf(stderr, "%s: ggml_init() failed\n", __func__); - return 1; - } - - - printf("Creating new tensors\n"); - // printf("Creating new tensor m1\n"); - struct ggml_tensor * m11 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizey); - ggml_set_f32(m11, 1.0f); - - // printf("Creating new tensor m1\n"); - struct ggml_tensor * m12 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizey); - ggml_set_f32(m12, 1.5f); - - // printf("Creating new tensor m2\n"); - struct ggml_tensor * m2 = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, sizex, sizez); - ggml_set_f32(m2, 2.0f); - - printf("\n------ Test 1 - Matrix Mult via F32 code\n"); - // printf("Creating new tensor m11xm2\n"); - struct ggml_tensor * m11xm2 = ggml_mul_mat(ctx, m11, m2); - - // printf("Creating compute graph\n"); - struct ggml_cgraph gf = ggml_build_forward(m11xm2); - - printf("n_threads=%i\n", benchmark_params.n_threads); - - TENSOR_DUMP(m11); - TENSOR_DUMP(m2); - - std::vector work_buffer; - - ggml_graph_compute_helper(work_buffer, &gf, benchmark_params.n_threads); - - TENSOR_DUMP(gf.nodes[0]); - - printf("\n------ Test 2 - Matrix Mult via %s code\n", ggml_type_name(qtype)); - - int32_t nelements = sizex*sizey; - - std::vector hist_cur(1 << 4, 0); - - // Set up a the benchmark matrices - // printf("Creating new tensor q11 & Running quantize\n"); - struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey); - ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements, hist_cur.data()); - - // Set up a the compute graph - // printf("Creating new tensor q31\n"); - struct ggml_tensor * q31 = ggml_mul_mat(ctx, q11, m2); - - // printf("Creating compute graph\n"); - struct ggml_cgraph gf31 = ggml_build_forward(q31); - - // Set up a second graph computation to make sure we override the CPU cache lines - // printf("Creating new tensor q12 & Running quantize\n"); - struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey); - ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements, hist_cur.data()); - - // printf("Creating new tensor q32\n"); - struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2); - - //printf("Creating compute graph\n"); - struct ggml_cgraph gf32 = ggml_build_forward(q32); - printf("n_threads=%i\n", benchmark_params.n_threads); - - const int dimx = sizex; - const int dimy = sizey; - const int dimz = sizez; - long long int flops_per_dot_product = dimy + dimy; - long long int flops_per_matrix = flops_per_dot_product * dimx * dimz; ; - printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - about %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000); - - - // Let's use the F32 result from above as a reference for the quantized multiplication - float sum_of_F32_reference = tensor_sum_elements(gf.nodes[0]); - - printf("Iteration;NThreads; SizeX; SizeY; SizeZ; Required_FLOPS; Elapsed_u_Seconds; gigaFLOPS\n"); - printf("=====================================================================================\n"); - - double gflops_sum = 0; - for (int i=0;i allowed_delta) { - printf("\nABORT - ERROR in Matrix Multiplication result - expected %6.2f, got %6.2f (delta %6.2f > allowed_delta %6.2f)\n", - sum_of_F32_reference, - sum_of_Q4_result, - delta, - allowed_delta - ); - exit(0); - } - - // Running a different graph computation to make sure we override the CPU cache lines - ggml_graph_compute_helper(work_buffer, &gf32, benchmark_params.n_threads); - } - printf("\n"); - printf("Average%78.2f\n",gflops_sum/((double)benchmark_params.n_iterations)); - printf("=====================================================================================\n"); -} diff --git a/spaces/Intel/ldm3d/static/public/js/WebVR.js b/spaces/Intel/ldm3d/static/public/js/WebVR.js deleted file mode 100644 index 20808284bc2e2c5e57ad213bda90626c481f3d2c..0000000000000000000000000000000000000000 --- a/spaces/Intel/ldm3d/static/public/js/WebVR.js +++ /dev/null @@ -1,261 +0,0 @@ -/** - * @author mrdoob / http://mrdoob.com - * @author Mugen87 / https://github.com/Mugen87 - * - * Based on @tojiro's vr-samples-utils.js - */ - -THREE.WEBVR = { - - createButton: function ( renderer, options ) { - - if ( options && options.referenceSpaceType ) { - - renderer.vr.setReferenceSpaceType( options.referenceSpaceType ); - - } - - function showEnterVR( device ) { - - button.style.display = ''; - - button.style.cursor = 'pointer'; - button.style.left = 'calc(50% - 50px)'; - button.style.width = '100px'; - - button.textContent = 'ENTER VR'; - - button.onmouseenter = function () { - - button.style.opacity = '1.0'; - - }; - - button.onmouseleave = function () { - - button.style.opacity = '0.5'; - - }; - - button.onclick = function () { - - device.isPresenting ? device.exitPresent() : device.requestPresent( [ { source: renderer.domElement } ] ); - - }; - - renderer.vr.setDevice( device ); - - } - - function showEnterXR( /*device*/ ) { - - var currentSession = null; - - function onSessionStarted( session ) { - - session.addEventListener( 'end', onSessionEnded ); - - renderer.vr.setSession( session ); - button.textContent = 'EXIT XR'; - - currentSession = session; - - } - - function onSessionEnded( /*event*/ ) { - - currentSession.removeEventListener( 'end', onSessionEnded ); - - renderer.vr.setSession( null ); - button.textContent = 'ENTER XR'; - - currentSession = null; - - } - - // - - button.style.display = ''; - - button.style.cursor = 'pointer'; - button.style.left = 'calc(50% - 50px)'; - button.style.width = '100px'; - - button.textContent = 'ENTER XR'; - - button.onmouseenter = function () { - - button.style.opacity = '1.0'; - - }; - - button.onmouseleave = function () { - - button.style.opacity = '0.5'; - - }; - - button.onclick = function () { - - if ( currentSession === null ) { - - // WebXR's requestReferenceSpace only works if the corresponding feature - // was requested at session creation time. For simplicity, just ask for - // the interesting ones as optional features, but be aware that the - // requestReferenceSpace call will fail if it turns out to be unavailable. - // ('local' is always available for immersive sessions and doesn't need to - // be requested separately.) - - var sessionInit = { optionalFeatures: [ 'local-floor', 'bounded-floor' ] }; - navigator.xr.requestSession( 'immersive-vr', sessionInit ).then( onSessionStarted ); - - } else { - - currentSession.end(); - - } - - }; - - } - - function disableButton() { - - button.style.display = ''; - - button.style.cursor = 'auto'; - button.style.left = 'calc(50% - 75px)'; - button.style.width = '150px'; - - button.onmouseenter = null; - button.onmouseleave = null; - - button.onclick = null; - - } - - function showVRNotFound() { - - disableButton(); - - button.textContent = 'VR NOT FOUND'; - - renderer.vr.setDevice( null ); - - } - - function showXRNotFound() { - - disableButton(); - - button.textContent = 'XR NOT FOUND'; - - } - - function stylizeElement( element ) { - - element.style.position = 'absolute'; - element.style.bottom = '20px'; - element.style.padding = '12px 6px'; - element.style.border = '1px solid #fff'; - element.style.borderRadius = '4px'; - element.style.background = 'rgba(0,0,0,0.1)'; - element.style.color = '#fff'; - element.style.font = 'normal 13px sans-serif'; - element.style.textAlign = 'center'; - element.style.opacity = '0.5'; - element.style.outline = 'none'; - element.style.zIndex = '999'; - - } - - if ( 'xr' in navigator ) { - - var button = document.createElement( 'button' ); - button.style.display = 'none'; - - stylizeElement( button ); - - navigator.xr.isSessionSupported( 'immersive-vr' ).then( function ( supported ) { - - if ( supported ) { - - showEnterXR(); - - } else { - - showXRNotFound(); - - } - - } ); - - return button; - - } else if ( 'getVRDisplays' in navigator ) { - - var button = document.createElement( 'button' ); - button.style.display = 'none'; - - stylizeElement( button ); - - window.addEventListener( 'vrdisplayconnect', function ( event ) { - - showEnterVR( event.display ); - - }, false ); - - window.addEventListener( 'vrdisplaydisconnect', function ( /*event*/ ) { - - showVRNotFound(); - - }, false ); - - window.addEventListener( 'vrdisplaypresentchange', function ( event ) { - - button.textContent = event.display.isPresenting ? 'EXIT VR' : 'ENTER VR'; - - }, false ); - - window.addEventListener( 'vrdisplayactivate', function ( event ) { - - event.display.requestPresent( [ { source: renderer.domElement } ] ); - - }, false ); - - navigator.getVRDisplays() - .then( function ( displays ) { - - if ( displays.length > 0 ) { - - showEnterVR( displays[ 0 ] ); - - } else { - - showVRNotFound(); - - } - - } ).catch( showVRNotFound ); - - return button; - - } else { - - var message = document.createElement( 'a' ); - message.href = 'https://webvr.info'; - message.innerHTML = 'WEBVR NOT SUPPORTED'; - - message.style.left = 'calc(50% - 90px)'; - message.style.width = '180px'; - message.style.textDecoration = 'none'; - - stylizeElement( message ); - - return message; - - } - - } - -}; \ No newline at end of file diff --git a/spaces/JoPmt/Short_Bedtime_Stories/README.md b/spaces/JoPmt/Short_Bedtime_Stories/README.md deleted file mode 100644 index b3c971f703624f6b4a97bee9321d5b9a8d4c0e80..0000000000000000000000000000000000000000 --- a/spaces/JoPmt/Short_Bedtime_Stories/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Short Bedtime Stories -emoji: 🐠 -colorFrom: gray -colorTo: red -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KaygNas/cut-it/Dockerfile b/spaces/KaygNas/cut-it/Dockerfile deleted file mode 100644 index 91448797d01519360d73ee81db6be6c4587aff1e..0000000000000000000000000000000000000000 --- a/spaces/KaygNas/cut-it/Dockerfile +++ /dev/null @@ -1,43 +0,0 @@ -# Use the official Node 18 image -FROM node:18 - -# Set the working directory to /html -WORKDIR /code - -# Copy and install dependencies -COPY package*.json ./ -RUN npm install - -# Build the web app -COPY . . -RUN npm run build - - -# Use the official Python 3.9 image -FROM python:3.9 - -# Set the working directory to /code -WORKDIR /code - -# Copy the current directory contents into the container at /code -COPY ./requirements.txt /code/requirements.txt - -# Install requirements.txt -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user -# Switch to the "user" user -USER user -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user main.py $HOME/app -COPY --chown=user --from=0 /code/dist $HOME/app/dist - -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/Kevin676/AutoGPT/autogpt/commands/web_playwright.py b/spaces/Kevin676/AutoGPT/autogpt/commands/web_playwright.py deleted file mode 100644 index 4e388ded203cefb5e24f9116f7fe5b8a94893413..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/autogpt/commands/web_playwright.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Web scraping commands using Playwright""" -from __future__ import annotations - -try: - from playwright.sync_api import sync_playwright -except ImportError: - print( - "Playwright not installed. Please install it with 'pip install playwright' to use." - ) -from bs4 import BeautifulSoup - -from autogpt.processing.html import extract_hyperlinks, format_hyperlinks - - -def scrape_text(url: str) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - - except Exception as e: - text = f"Error: {str(e)}" - - finally: - browser.close() - - return text - - -def scrape_links(url: str) -> str | list[str]: - """Scrape links from a webpage - - Args: - url (str): The URL to scrape links from - - Returns: - Union[str, List[str]]: The scraped links - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - hyperlinks = extract_hyperlinks(soup, url) - formatted_links = format_hyperlinks(hyperlinks) - - except Exception as e: - formatted_links = f"Error: {str(e)}" - - finally: - browser.close() - - return formatted_links diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/data_objects/speaker.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/data_objects/speaker.py deleted file mode 100644 index 494e882fe34fc38dcc793ab8c74a6cc2376bb7b5..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/encoder/data_objects/speaker.py +++ /dev/null @@ -1,40 +0,0 @@ -from encoder.data_objects.random_cycler import RandomCycler -from encoder.data_objects.utterance import Utterance -from pathlib import Path - -# Contains the set of utterances of a single speaker -class Speaker: - def __init__(self, root: Path): - self.root = root - self.name = root.name - self.utterances = None - self.utterance_cycler = None - - def _load_utterances(self): - with self.root.joinpath("_sources.txt").open("r") as sources_file: - sources = [l.split(",") for l in sources_file] - sources = {frames_fname: wave_fpath for frames_fname, wave_fpath in sources} - self.utterances = [Utterance(self.root.joinpath(f), w) for f, w in sources.items()] - self.utterance_cycler = RandomCycler(self.utterances) - - def random_partial(self, count, n_frames): - """ - Samples a batch of unique partial utterances from the disk in a way that all - utterances come up at least once every two cycles and in a random order every time. - - :param count: The number of partial utterances to sample from the set of utterances from - that speaker. Utterances are guaranteed not to be repeated if is not larger than - the number of utterances available. - :param n_frames: The number of frames in the partial utterance. - :return: A list of tuples (utterance, frames, range) where utterance is an Utterance, - frames are the frames of the partial utterances and range is the range of the partial - utterance with regard to the complete utterance. - """ - if self.utterances is None: - self._load_utterances() - - utterances = self.utterance_cycler.sample(count) - - a = [(u,) + u.random_partial(n_frames) for u in utterances] - - return a diff --git a/spaces/KevlarVK/content_summarizer/Utils.py b/spaces/KevlarVK/content_summarizer/Utils.py deleted file mode 100644 index 35d221759fa9d892bc3c7362e20411101f89e521..0000000000000000000000000000000000000000 --- a/spaces/KevlarVK/content_summarizer/Utils.py +++ /dev/null @@ -1,103 +0,0 @@ -import requests -from bs4 import BeautifulSoup -from nltk.tokenize import sent_tokenize -import nltk -import re -import streamlit as st -from youtube_transcript_api import YouTubeTranscriptApi -import spacy - -@st.cache -def fetch_article_text(url: str): - - r = requests.get(url) - soup = BeautifulSoup(r.text, "html.parser") - results = soup.find_all(["h1", "p"]) - text = [result.text for result in results] - ARTICLE = " ".join(text) - return re.sub(r'\[\d+\]', '', ARTICLE) - -def count_tokens(text: str): - return len(text.split(" ")) - -@st.cache -def get_text_from_youtube_url(url: str): - - id = url.split("=")[1] - try: - transcript = YouTubeTranscriptApi.get_transcript(id) - except: - transcript = YouTubeTranscriptApi.find_transcript(["en"]) - script = "" - - for text in transcript: - t = text["text"] - if t != '[Music]': - script += t.lower() + " " - - return add_punctuation(script) - -def add_punctuation(text: str): - - # try: - nlp = spacy.load("en_core_web_sm") - # except: - # import spacy.cli - # spacy.cli.download("en_core_web_sm") - # nlp = spacy.load("en_core_web_sm") - - doc = nlp(text) - punctuation = [".", ",", ";", ":", "?", "!"] - - sentences = [] - for sentence in doc.sents: - - last_token = sentence[-1] - if last_token.text in punctuation: - sentence = sentence[:-1] - - last_word = sentence[-1] - if last_word.pos_ == "NOUN": - sentence = sentence.text + "." - elif last_word.pos_ == "VERB": - sentence = sentence.text + "?" - else: - sentence = sentence.text + "." - - sentence = sentence[0].upper() + sentence[1:] - sentences.append(sentence) - - text_with_punctuation = " ".join(sentences) - - return text_with_punctuation - - -def get_input_chunks(text: str, max_length: int = 500): - - text = re.sub(r'\[\d+\]', '', text) - - try: - sentences = sent_tokenize(text) - except: - nltk.download('punkt') - sentences = sent_tokenize(text) - - sentences = [sentence for sentence in sentences if len(sentence.strip()) > 0 and count_tokens(sentence) > 4] - - input_chunks = [] - temp_sentences = "" - tokens = 0 - - for sentence in sentences: - if tokens + count_tokens(sentence) < max_length: - temp_sentences += sentence - tokens += count_tokens(sentence) - else: - input_chunks.append(temp_sentences) - tokens = count_tokens(sentence) - temp_sentences = sentence - - if len(temp_sentences) > 0: - input_chunks.append(temp_sentences) - - return input_chunks diff --git a/spaces/Kimata/Sanskrit-TTS/transforms.py b/spaces/Kimata/Sanskrit-TTS/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/fcos_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/fcos_head.py deleted file mode 100644 index f3206877a1e3684e7ecf90799bb234c59838f294..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/fcos_head.py +++ /dev/null @@ -1,455 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, List, Tuple - -import torch -import torch.nn as nn -from mmcv.cnn import Scale -from mmengine.structures import InstanceData -from torch import Tensor - -from mmdet.registry import MODELS -from mmdet.utils import (ConfigType, InstanceList, MultiConfig, - OptInstanceList, RangeType, reduce_mean) -from ..utils import multi_apply -from .anchor_free_head import AnchorFreeHead - -INF = 1e8 - - -@MODELS.register_module() -class FCOSHead(AnchorFreeHead): - """Anchor-free head used in `FCOS `_. - - The FCOS head does not use anchor boxes. Instead bounding boxes are - predicted at each pixel and a centerness measure is used to suppress - low-quality predictions. - Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training - tricks used in official repo, which will bring remarkable mAP gains - of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for - more detail. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - strides (Sequence[int] or Sequence[Tuple[int, int]]): Strides of points - in multiple feature levels. Defaults to (4, 8, 16, 32, 64). - regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple - level points. - center_sampling (bool): If true, use center sampling. - Defaults to False. - center_sample_radius (float): Radius of center sampling. - Defaults to 1.5. - norm_on_bbox (bool): If true, normalize the regression targets with - FPN strides. Defaults to False. - centerness_on_reg (bool): If true, position centerness on the - regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. - Defaults to False. - conv_bias (bool or str): If specified as `auto`, it will be decided by - the norm_cfg. Bias of conv will be set as True if `norm_cfg` is - None, otherwise False. Defaults to "auto". - loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. - loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss. - loss_centerness (:obj:`ConfigDict`, or dict): Config of centerness - loss. - norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and - config norm layer. Defaults to - ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. - init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ - dict]): Initialization config dict. - - Example: - >>> self = FCOSHead(11, 7) - >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] - >>> cls_score, bbox_pred, centerness = self.forward(feats) - >>> assert len(cls_score) == len(self.scales) - """ # noqa: E501 - - def __init__(self, - num_classes: int, - in_channels: int, - regress_ranges: RangeType = ((-1, 64), (64, 128), (128, 256), - (256, 512), (512, INF)), - center_sampling: bool = False, - center_sample_radius: float = 1.5, - norm_on_bbox: bool = False, - centerness_on_reg: bool = False, - loss_cls: ConfigType = dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox: ConfigType = dict(type='IoULoss', loss_weight=1.0), - loss_centerness: ConfigType = dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - norm_cfg: ConfigType = dict( - type='GN', num_groups=32, requires_grad=True), - init_cfg: MultiConfig = dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='conv_cls', - std=0.01, - bias_prob=0.01)), - **kwargs) -> None: - self.regress_ranges = regress_ranges - self.center_sampling = center_sampling - self.center_sample_radius = center_sample_radius - self.norm_on_bbox = norm_on_bbox - self.centerness_on_reg = centerness_on_reg - super().__init__( - num_classes=num_classes, - in_channels=in_channels, - loss_cls=loss_cls, - loss_bbox=loss_bbox, - norm_cfg=norm_cfg, - init_cfg=init_cfg, - **kwargs) - self.loss_centerness = MODELS.build(loss_centerness) - - def _init_layers(self) -> None: - """Initialize layers of the head.""" - super()._init_layers() - self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) - self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) - - def forward( - self, x: Tuple[Tensor] - ) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]: - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: A tuple of each level outputs. - - - cls_scores (list[Tensor]): Box scores for each scale level, \ - each is a 4D-tensor, the channel number is \ - num_points * num_classes. - - bbox_preds (list[Tensor]): Box energies / deltas for each \ - scale level, each is a 4D-tensor, the channel number is \ - num_points * 4. - - centernesses (list[Tensor]): centerness for each scale level, \ - each is a 4D-tensor, the channel number is num_points * 1. - """ - return multi_apply(self.forward_single, x, self.scales, self.strides) - - def forward_single(self, x: Tensor, scale: Scale, - stride: int) -> Tuple[Tensor, Tensor, Tensor]: - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - stride (int): The corresponding stride for feature maps, only - used to normalize the bbox prediction when self.norm_on_bbox - is True. - - Returns: - tuple: scores for each class, bbox predictions and centerness - predictions of input feature maps. - """ - cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) - if self.centerness_on_reg: - centerness = self.conv_centerness(reg_feat) - else: - centerness = self.conv_centerness(cls_feat) - # scale the bbox_pred of different level - # float to avoid overflow when enabling FP16 - bbox_pred = scale(bbox_pred).float() - if self.norm_on_bbox: - # bbox_pred needed for gradient computation has been modified - # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace - # F.relu(bbox_pred) with bbox_pred.clamp(min=0) - bbox_pred = bbox_pred.clamp(min=0) - if not self.training: - bbox_pred *= stride - else: - bbox_pred = bbox_pred.exp() - return cls_score, bbox_pred, centerness - - def loss_by_feat( - self, - cls_scores: List[Tensor], - bbox_preds: List[Tensor], - centernesses: List[Tensor], - batch_gt_instances: InstanceList, - batch_img_metas: List[dict], - batch_gt_instances_ignore: OptInstanceList = None - ) -> Dict[str, Tensor]: - """Calculate the loss based on the features extracted by the detection - head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - centernesses (list[Tensor]): centerness for each scale level, each - is a 4D-tensor, the channel number is num_points * 1. - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes`` and ``labels`` - attributes. - batch_img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): - Batch of gt_instances_ignore. It includes ``bboxes`` attribute - data that is ignored during training and testing. - Defaults to None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert len(cls_scores) == len(bbox_preds) == len(centernesses) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - all_level_points = self.prior_generator.grid_priors( - featmap_sizes, - dtype=bbox_preds[0].dtype, - device=bbox_preds[0].device) - labels, bbox_targets = self.get_targets(all_level_points, - batch_gt_instances) - - num_imgs = cls_scores[0].size(0) - # flatten cls_scores, bbox_preds and centerness - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) - for cls_score in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - for bbox_pred in bbox_preds - ] - flatten_centerness = [ - centerness.permute(0, 2, 3, 1).reshape(-1) - for centerness in centernesses - ] - flatten_cls_scores = torch.cat(flatten_cls_scores) - flatten_bbox_preds = torch.cat(flatten_bbox_preds) - flatten_centerness = torch.cat(flatten_centerness) - flatten_labels = torch.cat(labels) - flatten_bbox_targets = torch.cat(bbox_targets) - # repeat points to align with bbox_preds - flatten_points = torch.cat( - [points.repeat(num_imgs, 1) for points in all_level_points]) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((flatten_labels >= 0) - & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) - num_pos = torch.tensor( - len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) - num_pos = max(reduce_mean(num_pos), 1.0) - loss_cls = self.loss_cls( - flatten_cls_scores, flatten_labels, avg_factor=num_pos) - - pos_bbox_preds = flatten_bbox_preds[pos_inds] - pos_centerness = flatten_centerness[pos_inds] - pos_bbox_targets = flatten_bbox_targets[pos_inds] - pos_centerness_targets = self.centerness_target(pos_bbox_targets) - # centerness weighted iou loss - centerness_denorm = max( - reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) - - if len(pos_inds) > 0: - pos_points = flatten_points[pos_inds] - pos_decoded_bbox_preds = self.bbox_coder.decode( - pos_points, pos_bbox_preds) - pos_decoded_target_preds = self.bbox_coder.decode( - pos_points, pos_bbox_targets) - loss_bbox = self.loss_bbox( - pos_decoded_bbox_preds, - pos_decoded_target_preds, - weight=pos_centerness_targets, - avg_factor=centerness_denorm) - loss_centerness = self.loss_centerness( - pos_centerness, pos_centerness_targets, avg_factor=num_pos) - else: - loss_bbox = pos_bbox_preds.sum() - loss_centerness = pos_centerness.sum() - - return dict( - loss_cls=loss_cls, - loss_bbox=loss_bbox, - loss_centerness=loss_centerness) - - def get_targets( - self, points: List[Tensor], batch_gt_instances: InstanceList - ) -> Tuple[List[Tensor], List[Tensor]]: - """Compute regression, classification and centerness targets for points - in multiple images. - - Args: - points (list[Tensor]): Points of each fpn level, each has shape - (num_points, 2). - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes`` and ``labels`` - attributes. - - Returns: - tuple: Targets of each level. - - - concat_lvl_labels (list[Tensor]): Labels of each level. - - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ - level. - """ - assert len(points) == len(self.regress_ranges) - num_levels = len(points) - # expand regress ranges to align with points - expanded_regress_ranges = [ - points[i].new_tensor(self.regress_ranges[i])[None].expand_as( - points[i]) for i in range(num_levels) - ] - # concat all levels points and regress ranges - concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) - concat_points = torch.cat(points, dim=0) - - # the number of points per img, per lvl - num_points = [center.size(0) for center in points] - - # get labels and bbox_targets of each image - labels_list, bbox_targets_list = multi_apply( - self._get_targets_single, - batch_gt_instances, - points=concat_points, - regress_ranges=concat_regress_ranges, - num_points_per_lvl=num_points) - - # split to per img, per level - labels_list = [labels.split(num_points, 0) for labels in labels_list] - bbox_targets_list = [ - bbox_targets.split(num_points, 0) - for bbox_targets in bbox_targets_list - ] - - # concat per level image - concat_lvl_labels = [] - concat_lvl_bbox_targets = [] - for i in range(num_levels): - concat_lvl_labels.append( - torch.cat([labels[i] for labels in labels_list])) - bbox_targets = torch.cat( - [bbox_targets[i] for bbox_targets in bbox_targets_list]) - if self.norm_on_bbox: - bbox_targets = bbox_targets / self.strides[i] - concat_lvl_bbox_targets.append(bbox_targets) - return concat_lvl_labels, concat_lvl_bbox_targets - - def _get_targets_single( - self, gt_instances: InstanceData, points: Tensor, - regress_ranges: Tensor, - num_points_per_lvl: List[int]) -> Tuple[Tensor, Tensor]: - """Compute regression and classification targets for a single image.""" - num_points = points.size(0) - num_gts = len(gt_instances) - gt_bboxes = gt_instances.bboxes - gt_labels = gt_instances.labels - - if num_gts == 0: - return gt_labels.new_full((num_points,), self.num_classes), \ - gt_bboxes.new_zeros((num_points, 4)) - - areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( - gt_bboxes[:, 3] - gt_bboxes[:, 1]) - # TODO: figure out why these two are different - # areas = areas[None].expand(num_points, num_gts) - areas = areas[None].repeat(num_points, 1) - regress_ranges = regress_ranges[:, None, :].expand( - num_points, num_gts, 2) - gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) - xs, ys = points[:, 0], points[:, 1] - xs = xs[:, None].expand(num_points, num_gts) - ys = ys[:, None].expand(num_points, num_gts) - - left = xs - gt_bboxes[..., 0] - right = gt_bboxes[..., 2] - xs - top = ys - gt_bboxes[..., 1] - bottom = gt_bboxes[..., 3] - ys - bbox_targets = torch.stack((left, top, right, bottom), -1) - - if self.center_sampling: - # condition1: inside a `center bbox` - radius = self.center_sample_radius - center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 - center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 - center_gts = torch.zeros_like(gt_bboxes) - stride = center_xs.new_zeros(center_xs.shape) - - # project the points on current lvl back to the `original` sizes - lvl_begin = 0 - for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): - lvl_end = lvl_begin + num_points_lvl - stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius - lvl_begin = lvl_end - - x_mins = center_xs - stride - y_mins = center_ys - stride - x_maxs = center_xs + stride - y_maxs = center_ys + stride - center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], - x_mins, gt_bboxes[..., 0]) - center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], - y_mins, gt_bboxes[..., 1]) - center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], - gt_bboxes[..., 2], x_maxs) - center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], - gt_bboxes[..., 3], y_maxs) - - cb_dist_left = xs - center_gts[..., 0] - cb_dist_right = center_gts[..., 2] - xs - cb_dist_top = ys - center_gts[..., 1] - cb_dist_bottom = center_gts[..., 3] - ys - center_bbox = torch.stack( - (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) - inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 - else: - # condition1: inside a gt bbox - inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 - - # condition2: limit the regression range for each location - max_regress_distance = bbox_targets.max(-1)[0] - inside_regress_range = ( - (max_regress_distance >= regress_ranges[..., 0]) - & (max_regress_distance <= regress_ranges[..., 1])) - - # if there are still more than one objects for a location, - # we choose the one with minimal area - areas[inside_gt_bbox_mask == 0] = INF - areas[inside_regress_range == 0] = INF - min_area, min_area_inds = areas.min(dim=1) - - labels = gt_labels[min_area_inds] - labels[min_area == INF] = self.num_classes # set as BG - bbox_targets = bbox_targets[range(num_points), min_area_inds] - - return labels, bbox_targets - - def centerness_target(self, pos_bbox_targets: Tensor) -> Tensor: - """Compute centerness targets. - - Args: - pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape - (num_pos, 4) - - Returns: - Tensor: Centerness target. - """ - # only calculate pos centerness targets, otherwise there may be nan - left_right = pos_bbox_targets[:, [0, 2]] - top_bottom = pos_bbox_targets[:, [1, 3]] - if len(left_right) == 0: - centerness_targets = left_right[..., 0] - else: - centerness_targets = ( - left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( - top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) - return torch.sqrt(centerness_targets) diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/gfl_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/gfl_head.py deleted file mode 100644 index 6d2947a894892575c7f86ba6725456e6571f7585..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/gfl_head.py +++ /dev/null @@ -1,667 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Sequence, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, Scale -from mmengine.config import ConfigDict -from mmengine.structures import InstanceData -from torch import Tensor - -from mmdet.registry import MODELS, TASK_UTILS -from mmdet.structures.bbox import bbox_overlaps -from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, - OptInstanceList, reduce_mean) -from ..task_modules.prior_generators import anchor_inside_flags -from ..task_modules.samplers import PseudoSampler -from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply, - unmap) -from .anchor_head import AnchorHead - - -class Integral(nn.Module): - """A fixed layer for calculating integral result from distribution. - - This layer calculates the target location by :math: ``sum{P(y_i) * y_i}``, - P(y_i) denotes the softmax vector that represents the discrete distribution - y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max} - - Args: - reg_max (int): The maximal value of the discrete set. Defaults to 16. - You may want to reset it according to your new dataset or related - settings. - """ - - def __init__(self, reg_max: int = 16) -> None: - super().__init__() - self.reg_max = reg_max - self.register_buffer('project', - torch.linspace(0, self.reg_max, self.reg_max + 1)) - - def forward(self, x: Tensor) -> Tensor: - """Forward feature from the regression head to get integral result of - bounding box location. - - Args: - x (Tensor): Features of the regression head, shape (N, 4*(n+1)), - n is self.reg_max. - - Returns: - x (Tensor): Integral result of box locations, i.e., distance - offsets from the box center in four directions, shape (N, 4). - """ - x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) - x = F.linear(x, self.project.type_as(x)).reshape(-1, 4) - return x - - -@MODELS.register_module() -class GFLHead(AnchorHead): - """Generalized Focal Loss: Learning Qualified and Distributed Bounding - Boxes for Dense Object Detection. - - GFL head structure is similar with ATSS, however GFL uses - 1) joint representation for classification and localization quality, and - 2) flexible General distribution for bounding box locations, - which are supervised by - Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively - - https://arxiv.org/abs/2006.04388 - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - stacked_convs (int): Number of conv layers in cls and reg tower. - Defaults to 4. - conv_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct - and config conv layer. Defaults to None. - norm_cfg (:obj:`ConfigDict` or dict): dictionary to construct and - config norm layer. Default: dict(type='GN', num_groups=32, - requires_grad=True). - loss_qfl (:obj:`ConfigDict` or dict): Config of Quality Focal Loss - (QFL). - bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder. Defaults - to 'DistancePointBBoxCoder'. - reg_max (int): Max value of integral set :math: ``{0, ..., reg_max}`` - in QFL setting. Defaults to 16. - init_cfg (:obj:`ConfigDict` or dict or list[dict] or - list[:obj:`ConfigDict`]): Initialization config dict. - Example: - >>> self = GFLHead(11, 7) - >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] - >>> cls_quality_score, bbox_pred = self.forward(feats) - >>> assert len(cls_quality_score) == len(self.scales) - """ - - def __init__(self, - num_classes: int, - in_channels: int, - stacked_convs: int = 4, - conv_cfg: OptConfigType = None, - norm_cfg: ConfigType = dict( - type='GN', num_groups=32, requires_grad=True), - loss_dfl: ConfigType = dict( - type='DistributionFocalLoss', loss_weight=0.25), - bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'), - reg_max: int = 16, - init_cfg: MultiConfig = dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', - name='gfl_cls', - std=0.01, - bias_prob=0.01)), - **kwargs) -> None: - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.reg_max = reg_max - super().__init__( - num_classes=num_classes, - in_channels=in_channels, - bbox_coder=bbox_coder, - init_cfg=init_cfg, - **kwargs) - - if self.train_cfg: - self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) - if self.train_cfg.get('sampler', None) is not None: - self.sampler = TASK_UTILS.build( - self.train_cfg['sampler'], default_args=dict(context=self)) - else: - self.sampler = PseudoSampler(context=self) - - self.integral = Integral(self.reg_max) - self.loss_dfl = MODELS.build(loss_dfl) - - def _init_layers(self) -> None: - """Initialize layers of the head.""" - self.relu = nn.ReLU() - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - assert self.num_anchors == 1, 'anchor free version' - self.gfl_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - self.gfl_reg = nn.Conv2d( - self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) - self.scales = nn.ModuleList( - [Scale(1.0) for _ in self.prior_generator.strides]) - - def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]: - """Forward features from the upstream network. - - Args: - x (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: Usually a tuple of classification scores and bbox prediction - - - cls_scores (list[Tensor]): Classification and quality (IoU) - joint scores for all scale levels, each is a 4D-tensor, - the channel number is num_classes. - - bbox_preds (list[Tensor]): Box distribution logits for all - scale levels, each is a 4D-tensor, the channel number is - 4*(n+1), n is max value of integral set. - """ - return multi_apply(self.forward_single, x, self.scales) - - def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]: - """Forward feature of a single scale level. - - Args: - x (Tensor): Features of a single scale level. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - - Returns: - tuple: - - - cls_score (Tensor): Cls and quality joint scores for a single - scale level the channel number is num_classes. - - bbox_pred (Tensor): Box distribution logits for a single scale - level, the channel number is 4*(n+1), n is max value of - integral set. - """ - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.gfl_cls(cls_feat) - bbox_pred = scale(self.gfl_reg(reg_feat)).float() - return cls_score, bbox_pred - - def anchor_center(self, anchors: Tensor) -> Tensor: - """Get anchor centers from anchors. - - Args: - anchors (Tensor): Anchor list with shape (N, 4), ``xyxy`` format. - - Returns: - Tensor: Anchor centers with shape (N, 2), ``xy`` format. - """ - anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2 - anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2 - return torch.stack([anchors_cx, anchors_cy], dim=-1) - - def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor, - bbox_pred: Tensor, labels: Tensor, - label_weights: Tensor, bbox_targets: Tensor, - stride: Tuple[int], avg_factor: int) -> dict: - """Calculate the loss of a single scale level based on the features - extracted by the detection head. - - Args: - anchors (Tensor): Box reference for each scale level with shape - (N, num_total_anchors, 4). - cls_score (Tensor): Cls and quality joint scores for each scale - level has shape (N, num_classes, H, W). - bbox_pred (Tensor): Box distribution logits for each scale - level with shape (N, 4*(n+1), H, W), n is max value of integral - set. - labels (Tensor): Labels of each anchors with shape - (N, num_total_anchors). - label_weights (Tensor): Label weights of each anchor with shape - (N, num_total_anchors) - bbox_targets (Tensor): BBox regression targets of each anchor - weight shape (N, num_total_anchors, 4). - stride (Tuple[int]): Stride in this scale level. - avg_factor (int): Average factor that is used to average - the loss. When using sampling method, avg_factor is usually - the sum of positive and negative priors. When using - `PseudoSampler`, `avg_factor` is usually equal to the number - of positive priors. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert stride[0] == stride[1], 'h stride is not equal to w stride!' - anchors = anchors.reshape(-1, 4) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - bbox_pred = bbox_pred.permute(0, 2, 3, - 1).reshape(-1, 4 * (self.reg_max + 1)) - bbox_targets = bbox_targets.reshape(-1, 4) - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((labels >= 0) - & (labels < bg_class_ind)).nonzero().squeeze(1) - score = label_weights.new_zeros(labels.shape) - - if len(pos_inds) > 0: - pos_bbox_targets = bbox_targets[pos_inds] - pos_bbox_pred = bbox_pred[pos_inds] - pos_anchors = anchors[pos_inds] - pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0] - - weight_targets = cls_score.detach().sigmoid() - weight_targets = weight_targets.max(dim=1)[0][pos_inds] - pos_bbox_pred_corners = self.integral(pos_bbox_pred) - pos_decode_bbox_pred = self.bbox_coder.decode( - pos_anchor_centers, pos_bbox_pred_corners) - pos_decode_bbox_targets = pos_bbox_targets / stride[0] - score[pos_inds] = bbox_overlaps( - pos_decode_bbox_pred.detach(), - pos_decode_bbox_targets, - is_aligned=True) - pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1) - target_corners = self.bbox_coder.encode(pos_anchor_centers, - pos_decode_bbox_targets, - self.reg_max).reshape(-1) - - # regression loss - loss_bbox = self.loss_bbox( - pos_decode_bbox_pred, - pos_decode_bbox_targets, - weight=weight_targets, - avg_factor=1.0) - - # dfl loss - loss_dfl = self.loss_dfl( - pred_corners, - target_corners, - weight=weight_targets[:, None].expand(-1, 4).reshape(-1), - avg_factor=4.0) - else: - loss_bbox = bbox_pred.sum() * 0 - loss_dfl = bbox_pred.sum() * 0 - weight_targets = bbox_pred.new_tensor(0) - - # cls (qfl) loss - loss_cls = self.loss_cls( - cls_score, (labels, score), - weight=label_weights, - avg_factor=avg_factor) - - return loss_cls, loss_bbox, loss_dfl, weight_targets.sum() - - def loss_by_feat( - self, - cls_scores: List[Tensor], - bbox_preds: List[Tensor], - batch_gt_instances: InstanceList, - batch_img_metas: List[dict], - batch_gt_instances_ignore: OptInstanceList = None) -> dict: - """Calculate the loss based on the features extracted by the detection - head. - - Args: - cls_scores (list[Tensor]): Cls and quality scores for each scale - level has shape (N, num_classes, H, W). - bbox_preds (list[Tensor]): Box distribution logits for each scale - level with shape (N, 4*(n+1), H, W), n is max value of integral - set. - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes`` and ``labels`` - attributes. - batch_img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): - Batch of gt_instances_ignore. It includes ``bboxes`` attribute - data that is ignored during training and testing. - Defaults to None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.prior_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, batch_img_metas, device=device) - - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - batch_gt_instances, - batch_img_metas, - batch_gt_instances_ignore=batch_gt_instances_ignore) - - (anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, avg_factor) = cls_reg_targets - - avg_factor = reduce_mean( - torch.tensor(avg_factor, dtype=torch.float, device=device)).item() - - losses_cls, losses_bbox, losses_dfl,\ - avg_factor = multi_apply( - self.loss_by_feat_single, - anchor_list, - cls_scores, - bbox_preds, - labels_list, - label_weights_list, - bbox_targets_list, - self.prior_generator.strides, - avg_factor=avg_factor) - - avg_factor = sum(avg_factor) - avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() - losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) - losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) - return dict( - loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl) - - def _predict_by_feat_single(self, - cls_score_list: List[Tensor], - bbox_pred_list: List[Tensor], - score_factor_list: List[Tensor], - mlvl_priors: List[Tensor], - img_meta: dict, - cfg: ConfigDict, - rescale: bool = False, - with_nms: bool = True) -> InstanceData: - """Transform a single image's features extracted from the head into - bbox results. - - Args: - cls_score_list (list[Tensor]): Box scores from all scale - levels of a single image, each item has shape - (num_priors * num_classes, H, W). - bbox_pred_list (list[Tensor]): Box energies / deltas from - all scale levels of a single image, each item has shape - (num_priors * 4, H, W). - score_factor_list (list[Tensor]): Score factor from all scale - levels of a single image. GFL head does not need this value. - mlvl_priors (list[Tensor]): Each element in the list is - the priors of a single level in feature pyramid, has shape - (num_priors, 4). - img_meta (dict): Image meta info. - cfg (:obj: `ConfigDict`): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Defaults to False. - with_nms (bool): If True, do nms before return boxes. - Defaults to True. - - Returns: - tuple[Tensor]: Results of detected bboxes and labels. If with_nms - is False and mlvl_score_factor is None, return mlvl_bboxes and - mlvl_scores, else return mlvl_bboxes, mlvl_scores and - mlvl_score_factor. Usually with_nms is False is used for aug - test. If with_nms is True, then return the following format - - - det_bboxes (Tensor): Predicted bboxes with shape - [num_bboxes, 5], where the first 4 columns are bounding - box positions (tl_x, tl_y, br_x, br_y) and the 5-th - column are scores between 0 and 1. - - det_labels (Tensor): Predicted labels of the corresponding - box with shape [num_bboxes]. - """ - cfg = self.test_cfg if cfg is None else cfg - img_shape = img_meta['img_shape'] - nms_pre = cfg.get('nms_pre', -1) - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_labels = [] - for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate( - zip(cls_score_list, bbox_pred_list, - self.prior_generator.strides, mlvl_priors)): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - assert stride[0] == stride[1] - - bbox_pred = bbox_pred.permute(1, 2, 0) - bbox_pred = self.integral(bbox_pred) * stride[0] - - scores = cls_score.permute(1, 2, 0).reshape( - -1, self.cls_out_channels).sigmoid() - - # After https://github.com/open-mmlab/mmdetection/pull/6268/, - # this operation keeps fewer bboxes under the same `nms_pre`. - # There is no difference in performance for most models. If you - # find a slight drop in performance, you can set a larger - # `nms_pre` than before. - results = filter_scores_and_topk( - scores, cfg.score_thr, nms_pre, - dict(bbox_pred=bbox_pred, priors=priors)) - scores, labels, _, filtered_results = results - - bbox_pred = filtered_results['bbox_pred'] - priors = filtered_results['priors'] - - bboxes = self.bbox_coder.decode( - self.anchor_center(priors), bbox_pred, max_shape=img_shape) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_labels.append(labels) - - results = InstanceData() - results.bboxes = torch.cat(mlvl_bboxes) - results.scores = torch.cat(mlvl_scores) - results.labels = torch.cat(mlvl_labels) - - return self._bbox_post_process( - results=results, - cfg=cfg, - rescale=rescale, - with_nms=with_nms, - img_meta=img_meta) - - def get_targets(self, - anchor_list: List[Tensor], - valid_flag_list: List[Tensor], - batch_gt_instances: InstanceList, - batch_img_metas: List[dict], - batch_gt_instances_ignore: OptInstanceList = None, - unmap_outputs=True) -> tuple: - """Get targets for GFL head. - - This method is almost the same as `AnchorHead.get_targets()`. Besides - returning the targets as the parent method does, it also returns the - anchors as the first element of the returned tuple. - """ - num_imgs = len(batch_img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - num_level_anchors_list = [num_level_anchors] * num_imgs - - # concat all level anchors and flags to a single tensor - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - anchor_list[i] = torch.cat(anchor_list[i]) - valid_flag_list[i] = torch.cat(valid_flag_list[i]) - - # compute targets for each image - if batch_gt_instances_ignore is None: - batch_gt_instances_ignore = [None] * num_imgs - (all_anchors, all_labels, all_label_weights, all_bbox_targets, - all_bbox_weights, pos_inds_list, neg_inds_list, - sampling_results_list) = multi_apply( - self._get_targets_single, - anchor_list, - valid_flag_list, - num_level_anchors_list, - batch_gt_instances, - batch_img_metas, - batch_gt_instances_ignore, - unmap_outputs=unmap_outputs) - # Get `avg_factor` of all images, which calculate in `SamplingResult`. - # When using sampling method, avg_factor is usually the sum of - # positive and negative priors. When using `PseudoSampler`, - # `avg_factor` is usually equal to the number of positive priors. - avg_factor = sum( - [results.avg_factor for results in sampling_results_list]) - # split targets to a list w.r.t. multiple levels - anchors_list = images_to_levels(all_anchors, num_level_anchors) - labels_list = images_to_levels(all_labels, num_level_anchors) - label_weights_list = images_to_levels(all_label_weights, - num_level_anchors) - bbox_targets_list = images_to_levels(all_bbox_targets, - num_level_anchors) - bbox_weights_list = images_to_levels(all_bbox_weights, - num_level_anchors) - return (anchors_list, labels_list, label_weights_list, - bbox_targets_list, bbox_weights_list, avg_factor) - - def _get_targets_single(self, - flat_anchors: Tensor, - valid_flags: Tensor, - num_level_anchors: List[int], - gt_instances: InstanceData, - img_meta: dict, - gt_instances_ignore: Optional[InstanceData] = None, - unmap_outputs: bool = True) -> tuple: - """Compute regression, classification targets for anchors in a single - image. - - Args: - flat_anchors (Tensor): Multi-level anchors of the image, which are - concatenated into a single tensor of shape (num_anchors, 4) - valid_flags (Tensor): Multi level valid flags of the image, - which are concatenated into a single tensor of - shape (num_anchors,). - num_level_anchors (list[int]): Number of anchors of each scale - level. - gt_instances (:obj:`InstanceData`): Ground truth of instance - annotations. It usually includes ``bboxes`` and ``labels`` - attributes. - img_meta (dict): Meta information for current image. - gt_instances_ignore (:obj:`InstanceData`, optional): Instances - to be ignored during training. It includes ``bboxes`` attribute - data that is ignored during training and testing. - Defaults to None. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. Defaults to True. - - Returns: - tuple: N is the number of total anchors in the image. - - - anchors (Tensor): All anchors in the image with shape (N, 4). - - labels (Tensor): Labels of all anchors in the image with - shape (N,). - - label_weights (Tensor): Label weights of all anchor in the - image with shape (N,). - - bbox_targets (Tensor): BBox targets of all anchors in the - image with shape (N, 4). - - bbox_weights (Tensor): BBox weights of all anchors in the - image with shape (N, 4). - - pos_inds (Tensor): Indices of positive anchor with shape - (num_pos,). - - neg_inds (Tensor): Indices of negative anchor with shape - (num_neg,). - - sampling_result (:obj:`SamplingResult`): Sampling results. - """ - inside_flags = anchor_inside_flags(flat_anchors, valid_flags, - img_meta['img_shape'][:2], - self.train_cfg['allowed_border']) - if not inside_flags.any(): - raise ValueError( - 'There is no valid anchor inside the image boundary. Please ' - 'check the image size and anchor sizes, or set ' - '``allowed_border`` to -1 to skip the condition.') - # assign gt and sample anchors - anchors = flat_anchors[inside_flags, :] - num_level_anchors_inside = self.get_num_level_anchors_inside( - num_level_anchors, inside_flags) - pred_instances = InstanceData(priors=anchors) - assign_result = self.assigner.assign( - pred_instances=pred_instances, - num_level_priors=num_level_anchors_inside, - gt_instances=gt_instances, - gt_instances_ignore=gt_instances_ignore) - - sampling_result = self.sampler.sample( - assign_result=assign_result, - pred_instances=pred_instances, - gt_instances=gt_instances) - - num_valid_anchors = anchors.shape[0] - bbox_targets = torch.zeros_like(anchors) - bbox_weights = torch.zeros_like(anchors) - labels = anchors.new_full((num_valid_anchors, ), - self.num_classes, - dtype=torch.long) - label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - pos_bbox_targets = sampling_result.pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1.0 - - labels[pos_inds] = sampling_result.pos_gt_labels - if self.train_cfg['pos_weight'] <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg['pos_weight'] - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_anchors.size(0) - anchors = unmap(anchors, num_total_anchors, inside_flags) - labels = unmap( - labels, num_total_anchors, inside_flags, fill=self.num_classes) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) - bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) - - return (anchors, labels, label_weights, bbox_targets, bbox_weights, - pos_inds, neg_inds, sampling_result) - - def get_num_level_anchors_inside(self, num_level_anchors: List[int], - inside_flags: Tensor) -> List[int]: - """Get the number of valid anchors in every level.""" - - split_inside_flags = torch.split(inside_flags, num_level_anchors) - num_level_anchors_inside = [ - int(flags.sum()) for flags in split_inside_flags - ] - return num_level_anchors_inside diff --git a/spaces/LUOYE-123/QQsign/devices/device_8950.js b/spaces/LUOYE-123/QQsign/devices/device_8950.js deleted file mode 100644 index fe1caad4a8c5eb07633510e1d8a890197056a211..0000000000000000000000000000000000000000 --- a/spaces/LUOYE-123/QQsign/devices/device_8950.js +++ /dev/null @@ -1,344 +0,0 @@ -"use strict"; -var __importDefault = (this && this.__importDefault) || function (mod) { - return (mod && mod.__esModule) ? mod : { "default": mod }; -}; -Object.defineProperty(exports, "__esModule", { value: true }); -exports.getApkInfo = exports.Platform = exports.Device = exports.generateFullDevice = exports.generateShortDevice = void 0; -const crypto_1 = require("crypto"); -const constants_1 = require("./constants"); -const axios_1 = __importDefault(require("axios")); -const algo_1 = require("./algo"); -function generateImei() { - let imei = `86${(0, constants_1.randomString)(12, '0123456789')}`; - function calcSP(imei) { - let sum = 0; - for (let i = 0; i < imei.length; ++i) { - if (i % 2) { - let j = parseInt(imei[i]) * 2; - sum += j % 10 + Math.floor(j / 10); - } - else { - sum += parseInt(imei[i]); - } - } - return (100 - sum) % 10; - } - return imei + calcSP(imei); -} -/** 生成短设备信息 */ -function generateShortDevice() { - const randstr = (length, num = false) => { - const map = num ? '0123456789' : '0123456789abcdef'; - return (0, constants_1.randomString)(length, map); - }; - return { - "--begin--": "该设备为随机生成,丢失后不能得到原先配置", - product: `ILPP-${randstr(5).toUpperCase()}`, - device: `${randstr(5).toUpperCase()}`, - board: `${randstr(5).toUpperCase()}`, - brand: `${randstr(4).toUpperCase()}`, - model: `ICQQ ${randstr(4).toUpperCase()}`, - wifi_ssid: `HUAWEI-${randstr(7)}`, - bootloader: `U-boot`, - android_id: `IL.${randstr(7, true)}.${randstr(4, true)}`, - boot_id: `${randstr(8)}-${randstr(4)}-${randstr(4)}-${randstr(4)}-${randstr(12)}`, - proc_version: `Linux version 5.10.101-android12-${randstr(8)}`, - mac_address: `2D:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}:${randstr(2).toUpperCase()}`, - ip_address: `192.168.${randstr(2, true)}.${randstr(2, true)}`, - imei: `${generateImei()}`, - incremental: `${randstr(10, true).toUpperCase()}`, - "--end--": "修改后可能需要重新验证设备。" - }; -} -exports.generateShortDevice = generateShortDevice; -/** 生成完整设备信息 */ -function generateFullDevice(apk, d) { - if (!d) - d = generateShortDevice(); - return { - display: d.android_id, - product: d.product, - device: d.device, - board: d.board, - brand: d.brand, - model: d.model, - bootloader: d.bootloader, - fingerprint: `${d.brand}/${d.product}/${d.device}:10/${d.android_id}/${d.incremental}:user/release-keys`, - boot_id: d.boot_id, - proc_version: d.proc_version, - baseband: "", - sim: "T-Mobile", - os_type: "android", - mac_address: d.mac_address, - ip_address: d.ip_address, - wifi_bssid: d.mac_address, - wifi_ssid: d.wifi_ssid, - imei: d.imei, - android_id: (0, constants_1.md5)(d.android_id).toString("hex"), - apn: "wifi", - version: { - incremental: d.incremental, - release: "10", - codename: "REL", - sdk: 29, - }, - imsi: (0, crypto_1.randomBytes)(16), - guid: (0, constants_1.md5)(Buffer.concat([Buffer.from(d.imei), Buffer.from(d.mac_address)])), - }; -} -exports.generateFullDevice = generateFullDevice; -class Device { - constructor(apk, d) { - this.apk = apk; - this.secret = 'ZdJqM15EeO2zWc08'; - this.publicKey = `-----BEGIN PUBLIC KEY----- -MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDEIxgwoutfwoJxcGQeedgP7FG9 -qaIuS0qzfR8gWkrkTZKM2iWHn2ajQpBRZjMSoSf6+KJGvar2ORhBfpDXyVtZCKpq -LQ+FLkpncClKVIrBwv6PHyUvuCb0rIarmgDnzkfQAqVufEtR64iazGDKatvJ9y6B -9NMbHddGSAUmRTCrHQIDAQAB ------END PUBLIC KEY-----`; - if (!d) - d = generateShortDevice(); - Object.assign(this, generateFullDevice(apk, d)); - } - async getQIMEI() { - if (this.apk.app_key === "") { - return; - } - const k = (0, constants_1.randomString)(16); - const key = (0, algo_1.encryptPKCS1)(this.publicKey, k); - const time = Date.now(); - const nonce = (0, constants_1.randomString)(16); - const payload = this.genRandomPayloadByDevice(); - const params = (0, algo_1.aesEncrypt)(JSON.stringify(payload), k).toString('base64'); - try { - const { data } = await axios_1.default.post("https://snowflake.qq.com/ola/android", { - key, - params, - time, nonce, - sign: (0, constants_1.md5)(key + params + time + nonce + this.secret).toString("hex"), - extra: '' - }, { - headers: { - 'User-Agent': `Dalvik/2.1.0 (Linux; U; Android ${this.version.release}; PCRT00 Build/N2G48H)`, - 'Content-Type': "application/json" - } - }); - if (data?.code !== 0) { - return; - } - const { q16, q36 } = JSON.parse((0, algo_1.aesDecrypt)(data.data, k)); - this.qImei16 = q16; - this.qImei36 = q36; - } - catch { - } - } - genRandomPayloadByDevice() { - const fixedRand = (max = 1, min = 0) => { - if (max < min) - [max, min] = [min, max]; - const diff = max - min; - return Math.floor(Math.random() * diff) + min; - }; - const reserved = { - "harmony": "0", - "clone": Math.random() > 0.5 ? "1" : "0", - "containe": "", - "oz": "", - "oo": "", - "kelong": Math.random() > 0.5 ? "1" : "0", - "uptimes": (0, constants_1.formatTime)(new Date()), - "multiUser": Math.random() > 0.5 ? "1" : "0", - "bod": this.board, - "brd": this.brand, - "dv": this.device, - "firstLevel": "", - "manufact": this.brand, - "name": this.model, - "host": "se.infra", - "kernel": this.fingerprint - }; - const timestamp = Date.now(); - this.mtime = this.mtime || Date.now(); - const mtime1 = new Date(this.mtime || Date.now()); - const dateFormat = (fmt, time = Date.now()) => (0, constants_1.formatTime)(time, fmt); - const mtimeStr1 = dateFormat("YYYY-mm-ddHHMMSS", mtime1) + "." + this.imei.slice(2, 11); - const mtime2 = new Date(this.mtime - parseInt(this.imei.slice(2, 4))); - const mtimeStr2 = dateFormat("YYYY-mm-ddHHMMSS", mtime2) + "." + this.imei.slice(5, 14); - let beaconIdArr = [ - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - mtimeStr1, - '0000000000000000', - (0, constants_1.md5)(this.android_id + this.imei).toString("hex").slice(0, 16), - ...new Array(4).fill(false).map((_) => fixedRand(10000000, 1000000)), - this.boot_id, - '1', - fixedRand(5, 0), - fixedRand(5, 0), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(5, 0), - fixedRand(100, 10), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(50000, 10000), - fixedRand(100, 10), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - mtimeStr2, - fixedRand(10000, 1000), - fixedRand(5, 0), - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((10 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((11 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - fixedRand(10000, 1000), - fixedRand(100, 10), - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((11 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - `${dateFormat("YYYY-mm-ddHHMMSS")}.${String(((11 + parseInt(this.imei.slice(5, 7))) % 100)).padStart(2, "0")}0000000`, - fixedRand(10000, 1000), - fixedRand(5, 0), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(5, 0), - fixedRand(100, 10), - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - `${(0, constants_1.formatTime)(new Date(timestamp + fixedRand(60, 0)))}.${String(fixedRand(99, 0)).padStart(2, '0')}0000000`, - fixedRand(5, 0), - fixedRand(5, 0), - ].map((str, idx) => `k${idx + 1}:${str}`); - return { - "androidId": this.android_id, - "platformId": 1, - "appKey": this.apk.app_key, - "appVersion": this.apk.version, - "beaconIdSrc": beaconIdArr.join(';'), - "brand": this.brand, - "channelId": "2017", - "cid": "", - "imei": this.imei, - "imsi": this.imsi.toString("hex"), - "mac": this.mac_address, - "model": this.model, - "networkType": "unknown", - "oaid": "", - "osVersion": `Android ${this.version.release},level ${this.version.sdk}`, - "qimei": "", - "qimei36": "", - "sdkVersion": "1.2.13.6", - "targetSdkVersion": "26", - "audit": "", - "userId": "{}", - "packageId": this.apk.id, - "deviceType": this.display, - "sdkName": "", - "reserved": JSON.stringify(reserved), - }; - } -} -exports.Device = Device; -/** 支持的登录设备平台 */ -var Platform; -(function (Platform) { - Platform[Platform["Android"] = 1] = "Android"; - Platform[Platform["aPad"] = 2] = "aPad"; - Platform[Platform["Watch"] = 3] = "Watch"; - Platform[Platform["iMac"] = 4] = "iMac"; - Platform[Platform["iPad"] = 5] = "iPad"; - Platform[Platform["Tim"] = 6] = "Tim"; -})(Platform || (exports.Platform = Platform = {})); -const mobile = { - id: "com.tencent.mobileqq", - app_key: '0S200MNJT807V3GE', - name: "A8.9.50.f5a7d351", - version: "8.9.50.10650", - ver: "8.9.50", - sign: Buffer.from('A6 B7 45 BF 24 A2 C2 77 52 77 16 F6 F3 6E B6 8D'.split(' ').map(s => parseInt(s, 16))), - buildtime: 1676531414, - appid: 16, - subid: 537155547, - bitmap: 150470524, - main_sig_map: 16724722, - sub_sig_map: 0x10400, - sdkver: "6.0.0.2535", - display: "Android", - qua: 'V1_AND_SQ_8.9.50_3898_YYB_D', - ssover: 19, -}; -const tim = { - id: "com.tencent.tim", - app_key: '0S200MNJT807V3GE', - name: "A3.5.1.3168", - version: "3.5.1.3168", - ver: "3.5.1", - sign: Buffer.from('775e696d09856872fdd8ab4f3f06b1e0', 'hex'), - buildtime: 1630062176, - appid: 16, - subid: 537150355, - bitmap: 150470524, - main_sig_map: 16724722, - sub_sig_map: 0x10400, - sdkver: "6.0.0.2484", - display: "Tim", - qua: "V1_AND_SQ_8.3.9_351_TIM_D", - ssover: 18, -}; -const watch = { - id: "com.tencent.qqlite", - app_key: '0S200MNJT807V3GE', - name: "A2.0.8", - version: "2.0.8", - ver: "2.0.8", - sign: Buffer.from('A6 B7 45 BF 24 A2 C2 77 52 77 16 F6 F3 6E B6 8D'.split(' ').map(s => parseInt(s, 16))), - buildtime: 1559564731, - appid: 16, - subid: 537065138, - bitmap: 16252796, - main_sig_map: 16724722, - sub_sig_map: 0x10400, - sdkver: "6.0.0.2365", - display: "Watch", - qua: '', - ssover: 5 -}; -const hd = { - id: "com.tencent.minihd.qq", - app_key: '0S200MNJT807V3GE', - name: "A5.9.3.3468", - version: "5.9.3.3468", - ver: "5.9.3", - sign: Buffer.from('AA 39 78 F4 1F D9 6F F9 91 4A 66 9E 18 64 74 C7'.split(' ').map(s => parseInt(s, 16))), - buildtime: 1637427966, - appid: 16, - subid: 537128930, - bitmap: 150470524, - main_sig_map: 1970400, - sub_sig_map: 66560, - sdkver: "6.0.0.2433", - display: "iMac", - qua: '', - ssover: 12 -}; -const apklist = { - [Platform.Android]: mobile, - [Platform.Tim]: tim, - [Platform.aPad]: { - ...mobile, - subid: 537155599, - display: 'aPad' - }, - [Platform.Watch]: watch, - [Platform.iMac]: { ...hd }, - [Platform.iPad]: { - ...mobile, - subid: 537155074, - sign: hd.sign, - name: 'A8.9.50.611', - version: 'A8.9.50.611', - sdkver: '6.0.0.2535', - qua: 'V1_AND_SQ_8.9.50_3898_YYB_D', - display: 'iPad' - }, -}; -function getApkInfo(p) { - return apklist[p] || apklist[Platform.Android]; -} -exports.getApkInfo = getApkInfo; diff --git a/spaces/LZRi/LZR-Bert-VITS2/train_ms.py b/spaces/LZRi/LZR-Bert-VITS2/train_ms.py deleted file mode 100644 index 5d109003d40497ea4493e7c73f47c1eb7370a81e..0000000000000000000000000000000000000000 --- a/spaces/LZRi/LZR-Bert-VITS2/train_ms.py +++ /dev/null @@ -1,402 +0,0 @@ -import os -import json -import argparse -import itertools -import math -import torch -import shutil -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.multiprocessing as mp -import torch.distributed as dist -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.cuda.amp import autocast, GradScaler -from tqdm import tqdm -import logging -logging.getLogger('numba').setLevel(logging.WARNING) -import commons -import utils -from data_utils import ( - TextAudioSpeakerLoader, - TextAudioSpeakerCollate, - DistributedBucketSampler -) -from models import ( - SynthesizerTrn, - MultiPeriodDiscriminator, - DurationDiscriminator, -) -from losses import ( - generator_loss, - discriminator_loss, - feature_loss, - kl_loss -) -from mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from text.symbols import symbols - -torch.backends.cudnn.benchmark = True -torch.backends.cuda.matmul.allow_tf32 = True -torch.backends.cudnn.allow_tf32 = True -torch.set_float32_matmul_precision('medium') -global_step = 0 - - -def main(): - """Assume Single Node Multi GPUs Training Only""" - assert torch.cuda.is_available(), "CPU training is not allowed." - - n_gpus = torch.cuda.device_count() - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '65280' - - hps = utils.get_hparams() - if not hps.cont: - shutil.copy('./pretrained_models/D_0.pth','./logs/OUTPUT_MODEL/D_0.pth') - shutil.copy('./pretrained_models/G_0.pth','./logs/OUTPUT_MODEL/G_0.pth') - shutil.copy('./pretrained_models/DUR_0.pth','./logs/OUTPUT_MODEL/DUR_0.pth') - mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) - torch.manual_seed(hps.train.seed) - torch.cuda.set_device(rank) - - train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size, - [32, 300, 400, 500, 600, 700, 800, 900, 1000], - num_replicas=n_gpus, - rank=rank, - shuffle=True) - collate_fn = TextAudioSpeakerCollate() - train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, - collate_fn=collate_fn, batch_sampler=train_sampler) - if rank == 0: - eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) - eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, - batch_size=1, pin_memory=True, - drop_last=False, collate_fn=collate_fn) - if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: - print("Using noise scaled MAS for VITS2") - use_noise_scaled_mas = True - mas_noise_scale_initial = 0.01 - noise_scale_delta = 2e-6 - else: - print("Using normal MAS for VITS1") - use_noise_scaled_mas = False - mas_noise_scale_initial = 0.0 - noise_scale_delta = 0.0 - if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: - print("Using duration discriminator for VITS2") - use_duration_discriminator = True - net_dur_disc = DurationDiscriminator( - hps.model.hidden_channels, - hps.model.hidden_channels, - 3, - 0.1, - gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, - ).cuda(rank) - if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: - if hps.data.n_speakers == 0: - raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") - use_spk_conditioned_encoder = True - else: - print("Using normal encoder for VITS1") - use_spk_conditioned_encoder = False - - net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - mas_noise_scale_initial = mas_noise_scale_initial, - noise_scale_delta = noise_scale_delta, - **hps.model).cuda(rank) - - freeze_enc = getattr(hps.model, "freeze_enc", False) - if freeze_enc: - print("freeze encoder !!!") - for param in net_g.enc_p.parameters(): - param.requires_grad = False - - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) - optim_g = torch.optim.AdamW( - filter(lambda p: p.requires_grad, net_g.parameters()), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - if net_dur_disc is not None: - optim_dur_disc = torch.optim.AdamW( - net_dur_disc.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps) - else: - optim_dur_disc = None - net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if net_dur_disc is not None: - net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) - - pretrain_dir = None - if pretrain_dir is None: - try: - if net_dur_disc is not None: - _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) - _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, - optim_g, skip_optimizer=not hps.cont) - _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, - optim_d, skip_optimizer=not hps.cont) - - epoch_str = max(epoch_str, 1) - global_step = (epoch_str - 1) * len(train_loader) - except Exception as e: - print(e) - epoch_str = 1 - global_step = 0 - else: - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, - optim_g, True) - _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, - optim_d, True) - - - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) - if net_dur_disc is not None: - scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) - else: - scheduler_dur_disc = None - scaler = GradScaler(enabled=hps.train.fp16_run) - - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) - else: - train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None) - scheduler_g.step() - scheduler_d.step() - if net_dur_disc is not None: - scheduler_dur_disc.step() - - -def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): - net_g, net_d, net_dur_disc = nets - optim_g, optim_d, optim_dur_disc = optims - scheduler_g, scheduler_d, scheduler_dur_disc = schedulers - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - if net_dur_disc is not None: - net_dur_disc.train() - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): - if net_g.module.use_noise_scaled_mas: - current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step - net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) - x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) - spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) - y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) - speakers = speakers.cuda(rank, non_blocking=True) - tone = tone.cuda(rank, non_blocking=True) - language = language.cuda(rank, non_blocking=True) - bert = bert.cuda(rank, non_blocking=True) - - with autocast(enabled=hps.train.fp16_run): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - - y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) - loss_disc_all = loss_disc - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) - with autocast(enabled=False): - # TODO: I think need to mean using the mask, but for now, just mean all - loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) - loss_dur_disc_all = loss_dur_disc - optim_dur_disc.zero_grad() - scaler.scale(loss_dur_disc_all).backward() - scaler.unscale_(optim_dur_disc) - grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) - scaler.step(optim_dur_disc) - - optim_d.zero_grad() - scaler.scale(loss_disc_all).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) - if net_dur_disc is not None: - y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - if net_dur_disc is not None: - loss_dur_gen, losses_dur_gen = generator_loss(y_dur_hat_g) - loss_gen_all += loss_dur_gen - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]['lr'] - losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_dur, loss_kl] - logger.info('Train Epoch: {} [{:.0f}%]'.format( - epoch, - 100. * batch_idx / len(train_loader))) - logger.info([x.item() for x in losses] + [global_step, lr]) - - scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, - "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} - scalar_dict.update( - {"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/dur": loss_dur, "loss/g/kl": loss_kl}) - scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) - scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) - scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) - - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), - "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), - "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), - "all/attn": utils.plot_alignment_to_numpy(attn[0, 0].data.cpu().numpy()) - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict) - - if global_step % hps.train.eval_interval == 0: - evaluate(hps, net_g, eval_loader, writer_eval) - utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) - utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) - if net_dur_disc is not None: - utils.save_checkpoint(net_dur_disc, optim_dur_disc, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "DUR_{}.pth".format(global_step))) - keep_ckpts = getattr(hps.train, 'keep_ckpts', 5) - if keep_ckpts > 0: - utils.clean_checkpoints(path_to_models=hps.model_dir, n_ckpts_to_keep=keep_ckpts, sort_by_time=True) - - - global_step += 1 - - if rank == 0: - logger.info('====> Epoch: {}'.format(epoch)) - - - -def evaluate(hps, generator, eval_loader, writer_eval): - generator.eval() - image_dict = {} - audio_dict = {} - print("Evaluating ...") - with torch.no_grad(): - for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in enumerate(eval_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - spec, spec_lengths = spec.cuda(), spec_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - speakers = speakers.cuda() - bert = bert.cuda() - tone = tone.cuda() - language = language.cuda() - for use_sdp in [True, False]: - y_hat, attn, mask, *_ = generator.module.infer(x, x_lengths, speakers, tone, language, bert, y=spec, max_len=1000, sdp_ratio=0.0 if not use_sdp else 1.0) - y_hat_lengths = mask.sum([1, 2]).long() * hps.data.hop_length - - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax) - y_hat_mel = mel_spectrogram_torch( - y_hat.squeeze(1).float(), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax - ) - image_dict.update({ - f"gen/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()) - }) - audio_dict.update({ - f"gen/audio_{batch_idx}_{use_sdp}": y_hat[0, :, :y_hat_lengths[0]] - }) - image_dict.update({f"gt/mel_{batch_idx}": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy())}) - audio_dict.update({f"gt/audio_{batch_idx}": y[0, :, :y_lengths[0]]}) - - utils.summarize( - writer=writer_eval, - global_step=global_step, - images=image_dict, - audios=audio_dict, - audio_sampling_rate=hps.data.sampling_rate - ) - generator.train() - -if __name__ == "__main__": - main() diff --git a/spaces/Lamai/LAMAIGPT/autogpt/config/ai_config.py b/spaces/Lamai/LAMAIGPT/autogpt/config/ai_config.py deleted file mode 100644 index d50c30beee9dc8009f63415378ae1c6a399f0037..0000000000000000000000000000000000000000 --- a/spaces/Lamai/LAMAIGPT/autogpt/config/ai_config.py +++ /dev/null @@ -1,121 +0,0 @@ -# sourcery skip: do-not-use-staticmethod -""" -A module that contains the AIConfig class object that contains the configuration -""" -from __future__ import annotations - -import os -from typing import Type - -import yaml - - -class AIConfig: - """ - A class object that contains the configuration information for the AI - - Attributes: - ai_name (str): The name of the AI. - ai_role (str): The description of the AI's role. - ai_goals (list): The list of objectives the AI is supposed to complete. - """ - - def __init__( - self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None - ) -> None: - """ - Initialize a class instance - - Parameters: - ai_name (str): The name of the AI. - ai_role (str): The description of the AI's role. - ai_goals (list): The list of objectives the AI is supposed to complete. - Returns: - None - """ - if ai_goals is None: - ai_goals = [] - self.ai_name = ai_name - self.ai_role = ai_role - self.ai_goals = ai_goals - - # Soon this will go in a folder where it remembers more stuff about the run(s) - SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml") - - @staticmethod - def load(config_file: str = SAVE_FILE) -> "AIConfig": - """ - Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from - yaml file if yaml file exists, - else returns class with no parameters. - - Parameters: - config_file (int): The path to the config yaml file. - DEFAULT: "../ai_settings.yaml" - - Returns: - cls (object): An instance of given cls object - """ - - try: - with open(config_file, encoding="utf-8") as file: - config_params = yaml.load(file, Loader=yaml.FullLoader) - except FileNotFoundError: - config_params = {} - - ai_name = config_params.get("ai_name", "") - ai_role = config_params.get("ai_role", "") - ai_goals = config_params.get("ai_goals", []) - # type: Type[AIConfig] - return AIConfig(ai_name, ai_role, ai_goals) - - def save(self, config_file: str = SAVE_FILE) -> None: - """ - Saves the class parameters to the specified file yaml file path as a yaml file. - - Parameters: - config_file(str): The path to the config yaml file. - DEFAULT: "../ai_settings.yaml" - - Returns: - None - """ - - config = { - "ai_name": self.ai_name, - "ai_role": self.ai_role, - "ai_goals": self.ai_goals, - } - with open(config_file, "w", encoding="utf-8") as file: - yaml.dump(config, file, allow_unicode=True) - - def construct_full_prompt(self) -> str: - """ - Returns a prompt to the user with the class information in an organized fashion. - - Parameters: - None - - Returns: - full_prompt (str): A string containing the initial prompt for the user - including the ai_name, ai_role and ai_goals. - """ - - prompt_start = ( - "Your decisions must always be made independently without" - " seeking user assistance. Play to your strengths as an LLM and pursue" - " simple strategies with no legal complications." - "" - ) - - from autogpt.prompt import get_prompt - - # Construct full prompt - full_prompt = ( - f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" - ) - for i, goal in enumerate(self.ai_goals): - full_prompt += f"{i+1}. {goal}\n" - - full_prompt += f"\n\n{get_prompt()}" - return full_prompt diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/go-applio.bat b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/go-applio.bat deleted file mode 100644 index 70cc1bea97c811535eb36665c4a57acfe788dde4..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/go-applio.bat +++ /dev/null @@ -1,100 +0,0 @@ -@echo off -setlocal -title Applio - Start -cd %~dp0 - -::: -::: _ _ -::: /\ | (_) -::: / \ _ __ _ __ | |_ ___ -::: / /\ \ | '_ \| '_ \| | |/ _ \ -::: / ____ \| |_) | |_) | | | (_) | -::: /_/ \_\ .__/| .__/|_|_|\___/ -::: | | | | -::: |_| |_| -::: -::: - -for /f "usebackq delims=" %%i in ("%cd%\assets\configs\version.txt") do ( - set "localVersion=%%i" -) -for /f %%i in ('powershell -command "(Invoke-WebRequest -Uri 'https://raw.githubusercontent.com/IAHispano/Applio-RVC-Fork/main/assets/configs/version.txt').Content"') do set "onlineVersion=%%i" - -:menu -for /f "delims=: tokens=*" %%A in ('findstr /b ":::" "%~f0"') do @echo(%%A -powershell -command "if ('%localVersion%' -lt '%onlineVersion%') { exit 1 } else { exit 0 }" -if %errorlevel% equ 1 ( - echo You are currently using an outdated version %localVersion% - echo. - echo We're excited to announce that version %onlineVersion% is now available for download on https://github.com/IAHispano/Applio-RVC-Fork. - echo Upgrade now to access the latest features and improvements! - echo. - goto continue -) else ( - goto continue -) - -:continue -echo Runtime: Recommended for regular users -echo [1] Start Applio - Runtime ^(Nvidia Support^) -echo [2] Start Applio - Runtime ^(Intel Support. Requires Nvidia runtime^) -echo [3] Start Applio - Runtime ^(AMD Support^) -echo. -echo Dependencies: Only recommended for experienced users -echo [4] Start Applio ^(Nvidia Support^) -echo [5] Start Applio ^(AMD Support^) -echo. -echo [6] Exit -echo. - -set /p choice=Select an option: -set choice=%choice: =% - -if "%choice%"=="6" ( - goto finish -) else if "%choice%"=="5" ( - cls - echo Starting Applio with AMD support... - python infer-web.py --pycmd python --port 7897 --dml --theme dark - pause - cls - goto menu -) else if "%choice%"=="4" ( - cls - echo Starting Applio with Nvidia support... - python infer-web.py --pycmd python --port 7897 --theme dark - pause - cls - goto menu -) else if "%choice%"=="3" ( - cls - echo Starting Applio with runtime for AMD support ^(you must have it installed^)... - runtime\python.exe infer-web.py --pycmd runtime/python.exe --port 7897 --dml --theme dark - pause - cls - goto menu -) else if "%choice%"=="2" ( - runtime\python.exe -m pip install scikit-learn-intelex - cls - echo Starting Applio with runtime for Intel CPU support ^(you must have Nvidia support installed^)... - runtime\python.exe -m sklearnex infer-web.py --pycmd runtime/python.exe --port 7897 --theme dark - pause - cls - goto menu -) else if "%choice%"=="1" ( - cls - echo Starting Applio with runtime for Nvidia support ^(you must have it installed^)... - runtime\python.exe infer-web.py --pycmd runtime/python.exe --port 7897 --theme dark - pause - cls - goto menu -) - -cls -echo Invalid option. Please enter a number from 1 to 5. -echo. -echo Press 'Enter' to access the main menu... -pause>nul -cls -goto menu -:finish diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/go-tensorboard.bat b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/go-tensorboard.bat deleted file mode 100644 index 631402eac66b7f9c39d803e6a280aa50dd3884b9..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/go-tensorboard.bat +++ /dev/null @@ -1,5 +0,0 @@ -title Applio - Tensorboard -cd %~dp0 -cls -python lib/fixes/tensor-launch.py -pause diff --git a/spaces/MLVKU/Human_Object_Interaction/README.md b/spaces/MLVKU/Human_Object_Interaction/README.md deleted file mode 100644 index d6ac37bd187c96985e9f12343c4c83adf360c7d6..0000000000000000000000000000000000000000 --- a/spaces/MLVKU/Human_Object_Interaction/README.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: HOI detection (HOTR_CPC) -emoji: ⚡ -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.3 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# CPC_HOTR - -This repository contains the application of [Cross-Path Consistency Learning](https://arxiv.org/abs/2204.04836) at [HOTR](https://arxiv.org/abs/2104.13682), based on the official implementation of HOTR in [here](https://github.com/kakaobrain/HOTR). - -
- -
- - -## 1. Environmental Setup -```bash -$ conda create -n HOTR_CPC python=3.7 -$ conda install -c pytorch pytorch torchvision # PyTorch 1.7.1, torchvision 0.8.2, CUDA=11.0 -$ conda install cython scipy -$ pip install pycocotools -$ pip install opencv-python -$ pip install wandb -``` - -## 2. HOI dataset setup -Our current version of HOTR supports the experiments for both [V-COCO](https://github.com/s-gupta/v-coco) and [HICO-DET](https://drive.google.com/file/d/1QZcJmGVlF9f4h-XLWe9Gkmnmj2z1gSnk/view) dataset. -Download the dataset under the pulled directory. -For HICO-DET, we use the [annotation files](https://drive.google.com/file/d/1QZcJmGVlF9f4h-XLWe9Gkmnmj2z1gSnk/view) provided by the PPDM authors. -Download the [list of actions](https://drive.google.com/open?id=1EeHNHuYyJI-qqDk_-5nay7Mb07tzZLsl) as `list_action.txt` and place them under the unballed hico-det directory. -Below we present how you should place the files. -```bash -# V-COCO setup -$ git clone https://github.com/s-gupta/v-coco.git -$ cd v-coco -$ ln -s [:COCO_DIR] coco/images # COCO_DIR contains images of train2014 & val2014 -$ python script_pick_annotations.py [:COCO_DIR]/annotations - -# HICO-DET setup -$ tar -zxvf hico_20160224_det.tar.gz # move the unballed folder under the pulled repository - -# dataset setup -HOTR - │─ v-coco - │ │─ data - │ │ │─ instances_vcoco_all_2014.json - │ │ : - │ └─ coco - │ │─ images - │ │ │─ train2014 - │ │ │ │─ COCO_train2014_000000000009.jpg - │ │ │ : - │ │ └─ val2014 - │ │ │─ COCO_val2014_000000000042.jpg - : : : - │─ hico_20160224_det - │ │─ list_action.txt - │ │─ annotations - │ │ │─ trainval_hico.json - │ │ │─ test_hico.json - │ │ └─ corre_hico.npy - : : -``` - -If you wish to download the datasets on our own directory, simply change the 'data_path' argument to the directory you have downloaded the datasets. -```bash ---data_path [:your_own_directory]/[v-coco/hico_20160224_det] -``` - -## 3. Training -After the preparation, you can start the training with the following command. - -For the HICO-DET training. -``` -GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 8 ./configs/hico_train.sh -``` -For the V-COCO training. -``` -GPUS_PER_NODE=8 ./tools/run_dist_launch.sh 8 ./configs/vcoco_train.sh -``` - -## 4. Evaluation -For evaluation of main inference path P1 (x->HOI), `--path_id` should be set to 0. -Indexes of Augmented paths are range to 1~3. (1: x->HO->I, 2: x->HI->O, 3: x->OI->H) - -HICODET -``` -python -m torch.distributed.launch \ - --nproc_per_node=8 \ - --use_env main.py \ - --batch_size 2 \ - --HOIDet \ - --path_id 0 \ - --share_enc \ - --pretrained_dec \ - --share_dec_param \ - --num_hoi_queries [:query_num] \ - --object_threshold 0 \ - --temperature 0.2 \ # use the exact same temperature value that you used during training! - --no_aux_loss \ - --eval \ - --dataset_file hico-det \ - --data_path hico_20160224_det \ - --resume checkpoints/hico_det/hico_[:query_num].pth -``` - -VCOCO -``` -python -m torch.distributed.launch \ - --nproc_per_node=8 \ - --use_env main.py \ - --batch_size 2 \ - --HOIDet \ - --path_id 0 \ - --share_enc \ - --share_dec_param \ - --pretrained_dec \ - --num_hoi_queries [:query_num] \ - --temperature 0.05 \ # use the exact same temperature value that you used during training! - --object_threshold 0 \ - --no_aux_loss \ - --eval \ - --dataset_file vcoco \ - --data_path v-coco \ - --resume checkpoints/vcoco/vcoco_[:query_num].pth -``` - -## Citation -``` -@inproceedings{park2022consistency, - title={Consistency Learning via Decoding Path Augmentation for Transformers in Human Object Interaction Detection}, - author={Park, Jihwan and Lee, SeungJun and Heo, Hwan and Choi, Hyeong Kyu and Kim, Hyunwoo J}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - year={2022} -} -``` diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/automatic_mask_generator.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/automatic_mask_generator.py deleted file mode 100644 index 23264971b7ff5aa0b4f499ade7773b68dce984b6..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/segment_anything/segment_anything/automatic_mask_generator.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch -from torchvision.ops.boxes import batched_nms, box_area # type: ignore - -from typing import Any, Dict, List, Optional, Tuple - -from .modeling import Sam -from .predictor import SamPredictor -from .utils.amg import ( - MaskData, - area_from_rle, - batch_iterator, - batched_mask_to_box, - box_xyxy_to_xywh, - build_all_layer_point_grids, - calculate_stability_score, - coco_encode_rle, - generate_crop_boxes, - is_box_near_crop_edge, - mask_to_rle_pytorch, - remove_small_regions, - rle_to_mask, - uncrop_boxes_xyxy, - uncrop_masks, - uncrop_points, -) - - -class SamAutomaticMaskGenerator: - def __init__( - self, - model: Sam, - points_per_side: Optional[int] = 32, - points_per_batch: int = 64, - pred_iou_thresh: float = 0.88, - stability_score_thresh: float = 0.95, - stability_score_offset: float = 1.0, - box_nms_thresh: float = 0.7, - crop_n_layers: int = 0, - crop_nms_thresh: float = 0.7, - crop_overlap_ratio: float = 512 / 1500, - crop_n_points_downscale_factor: int = 1, - point_grids: Optional[List[np.ndarray]] = None, - min_mask_region_area: int = 0, - output_mode: str = "binary_mask", - ) -> None: - """ - Using a SAM model, generates masks for the entire image. - Generates a grid of point prompts over the image, then filters - low quality and duplicate masks. The default settings are chosen - for SAM with a ViT-H backbone. - - Arguments: - model (Sam): The SAM model to use for mask prediction. - points_per_side (int or None): The number of points to be sampled - along one side of the image. The total number of points is - points_per_side**2. If None, 'point_grids' must provide explicit - point sampling. - points_per_batch (int): Sets the number of points run simultaneously - by the model. Higher numbers may be faster but use more GPU memory. - pred_iou_thresh (float): A filtering threshold in [0,1], using the - model's predicted mask quality. - stability_score_thresh (float): A filtering threshold in [0,1], using - the stability of the mask under changes to the cutoff used to binarize - the model's mask predictions. - stability_score_offset (float): The amount to shift the cutoff when - calculated the stability score. - box_nms_thresh (float): The box IoU cutoff used by non-maximal - suppression to filter duplicate masks. - crops_n_layers (int): If >0, mask prediction will be run again on - crops of the image. Sets the number of layers to run, where each - layer has 2**i_layer number of image crops. - crops_nms_thresh (float): The box IoU cutoff used by non-maximal - suppression to filter duplicate masks between different crops. - crop_overlap_ratio (float): Sets the degree to which crops overlap. - In the first crop layer, crops will overlap by this fraction of - the image length. Later layers with more crops scale down this overlap. - crop_n_points_downscale_factor (int): The number of points-per-side - sampled in layer n is scaled down by crop_n_points_downscale_factor**n. - point_grids (list(np.ndarray) or None): A list over explicit grids - of points used for sampling, normalized to [0,1]. The nth grid in the - list is used in the nth crop layer. Exclusive with points_per_side. - min_mask_region_area (int): If >0, postprocessing will be applied - to remove disconnected regions and holes in masks with area smaller - than min_mask_region_area. Requires opencv. - output_mode (str): The form masks are returned in. Can be 'binary_mask', - 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. - For large resolutions, 'binary_mask' may consume large amounts of - memory. - """ - - assert (points_per_side is None) != ( - point_grids is None - ), "Exactly one of points_per_side or point_grid must be provided." - if points_per_side is not None: - self.point_grids = build_all_layer_point_grids( - points_per_side, - crop_n_layers, - crop_n_points_downscale_factor, - ) - elif point_grids is not None: - self.point_grids = point_grids - else: - raise ValueError("Can't have both points_per_side and point_grid be None.") - - assert output_mode in [ - "binary_mask", - "uncompressed_rle", - "coco_rle", - ], f"Unknown output_mode {output_mode}." - if output_mode == "coco_rle": - from pycocotools import mask as mask_utils # type: ignore # noqa: F401 - - if min_mask_region_area > 0: - import cv2 # type: ignore # noqa: F401 - - self.predictor = SamPredictor(model) - self.points_per_batch = points_per_batch - self.pred_iou_thresh = pred_iou_thresh - self.stability_score_thresh = stability_score_thresh - self.stability_score_offset = stability_score_offset - self.box_nms_thresh = box_nms_thresh - self.crop_n_layers = crop_n_layers - self.crop_nms_thresh = crop_nms_thresh - self.crop_overlap_ratio = crop_overlap_ratio - self.crop_n_points_downscale_factor = crop_n_points_downscale_factor - self.min_mask_region_area = min_mask_region_area - self.output_mode = output_mode - - @torch.no_grad() - def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: - """ - Generates masks for the given image. - - Arguments: - image (np.ndarray): The image to generate masks for, in HWC uint8 format. - - Returns: - list(dict(str, any)): A list over records for masks. Each record is - a dict containing the following keys: - segmentation (dict(str, any) or np.ndarray): The mask. If - output_mode='binary_mask', is an array of shape HW. Otherwise, - is a dictionary containing the RLE. - bbox (list(float)): The box around the mask, in XYWH format. - area (int): The area in pixels of the mask. - predicted_iou (float): The model's own prediction of the mask's - quality. This is filtered by the pred_iou_thresh parameter. - point_coords (list(list(float))): The point coordinates input - to the model to generate this mask. - stability_score (float): A measure of the mask's quality. This - is filtered on using the stability_score_thresh parameter. - crop_box (list(float)): The crop of the image used to generate - the mask, given in XYWH format. - """ - - # Generate masks - mask_data = self._generate_masks(image) - - # Filter small disconnected regions and holes in masks - if self.min_mask_region_area > 0: - mask_data = self.postprocess_small_regions( - mask_data, - self.min_mask_region_area, - max(self.box_nms_thresh, self.crop_nms_thresh), - ) - - # Encode masks - if self.output_mode == "coco_rle": - mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] - elif self.output_mode == "binary_mask": - mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] - else: - mask_data["segmentations"] = mask_data["rles"] - - # Write mask records - curr_anns = [] - for idx in range(len(mask_data["segmentations"])): - ann = { - "segmentation": mask_data["segmentations"][idx], - "area": area_from_rle(mask_data["rles"][idx]), - "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), - "predicted_iou": mask_data["iou_preds"][idx].item(), - "point_coords": [mask_data["points"][idx].tolist()], - "stability_score": mask_data["stability_score"][idx].item(), - "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), - } - curr_anns.append(ann) - - return curr_anns - - def _generate_masks(self, image: np.ndarray) -> MaskData: - orig_size = image.shape[:2] - crop_boxes, layer_idxs = generate_crop_boxes( - orig_size, self.crop_n_layers, self.crop_overlap_ratio - ) - - # Iterate over image crops - data = MaskData() - for crop_box, layer_idx in zip(crop_boxes, layer_idxs): - crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) - data.cat(crop_data) - - # Remove duplicate masks between crops - if len(crop_boxes) > 1: - # Prefer masks from smaller crops - scores = 1 / box_area(data["crop_boxes"]) - scores = scores.to(data["boxes"].device) - keep_by_nms = batched_nms( - data["boxes"].float(), - scores, - torch.zeros(len(data["boxes"])), # categories - iou_threshold=self.crop_nms_thresh, - ) - data.filter(keep_by_nms) - - data.to_numpy() - return data - - def _process_crop( - self, - image: np.ndarray, - crop_box: List[int], - crop_layer_idx: int, - orig_size: Tuple[int, ...], - ) -> MaskData: - # Crop the image and calculate embeddings - x0, y0, x1, y1 = crop_box - cropped_im = image[y0:y1, x0:x1, :] - cropped_im_size = cropped_im.shape[:2] - self.predictor.set_image(cropped_im) - - # Get points for this crop - points_scale = np.array(cropped_im_size)[None, ::-1] - points_for_image = self.point_grids[crop_layer_idx] * points_scale - - # Generate masks for this crop in batches - data = MaskData() - for (points,) in batch_iterator(self.points_per_batch, points_for_image): - batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) - data.cat(batch_data) - del batch_data - self.predictor.reset_image() - - # Remove duplicates within this crop. - keep_by_nms = batched_nms( - data["boxes"].float(), - data["iou_preds"], - torch.zeros(len(data["boxes"])), # categories - iou_threshold=self.box_nms_thresh, - ) - data.filter(keep_by_nms) - - # Return to the original image frame - data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) - data["points"] = uncrop_points(data["points"], crop_box) - data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) - - return data - - def _process_batch( - self, - points: np.ndarray, - im_size: Tuple[int, ...], - crop_box: List[int], - orig_size: Tuple[int, ...], - ) -> MaskData: - orig_h, orig_w = orig_size - - # Run model on this batch - transformed_points = self.predictor.transform.apply_coords(points, im_size) - in_points = torch.as_tensor(transformed_points, device=self.predictor.device) - in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) - masks, iou_preds, _ = self.predictor.predict_torch( - in_points[:, None, :], - in_labels[:, None], - multimask_output=True, - return_logits=True, - ) - - # Serialize predictions and store in MaskData - data = MaskData( - masks=masks.flatten(0, 1), - iou_preds=iou_preds.flatten(0, 1), - points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), - ) - del masks - - # Filter by predicted IoU - if self.pred_iou_thresh > 0.0: - keep_mask = data["iou_preds"] > self.pred_iou_thresh - data.filter(keep_mask) - - # Calculate stability score - data["stability_score"] = calculate_stability_score( - data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset - ) - if self.stability_score_thresh > 0.0: - keep_mask = data["stability_score"] >= self.stability_score_thresh - data.filter(keep_mask) - - # Threshold masks and calculate boxes - data["masks"] = data["masks"] > self.predictor.model.mask_threshold - data["boxes"] = batched_mask_to_box(data["masks"]) - - # Filter boxes that touch crop boundaries - keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) - if not torch.all(keep_mask): - data.filter(keep_mask) - - # Compress to RLE - data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) - data["rles"] = mask_to_rle_pytorch(data["masks"]) - del data["masks"] - - return data - - @staticmethod - def postprocess_small_regions( - mask_data: MaskData, min_area: int, nms_thresh: float - ) -> MaskData: - """ - Removes small disconnected regions and holes in masks, then reruns - box NMS to remove any new duplicates. - - Edits mask_data in place. - - Requires open-cv as a dependency. - """ - if len(mask_data["rles"]) == 0: - return mask_data - - # Filter small disconnected regions and holes - new_masks = [] - scores = [] - for rle in mask_data["rles"]: - mask = rle_to_mask(rle) - - mask, changed = remove_small_regions(mask, min_area, mode="holes") - unchanged = not changed - mask, changed = remove_small_regions(mask, min_area, mode="islands") - unchanged = unchanged and not changed - - new_masks.append(torch.as_tensor(mask).unsqueeze(0)) - # Give score=0 to changed masks and score=1 to unchanged masks - # so NMS will prefer ones that didn't need postprocessing - scores.append(float(unchanged)) - - # Recalculate boxes and remove any new duplicates - masks = torch.cat(new_masks, dim=0) - boxes = batched_mask_to_box(masks) - keep_by_nms = batched_nms( - boxes.float(), - torch.as_tensor(scores), - torch.zeros(len(boxes)), # categories - iou_threshold=nms_thresh, - ) - - # Only recalculate RLEs for masks that have changed - for i_mask in keep_by_nms: - if scores[i_mask] == 0.0: - mask_torch = masks[i_mask].unsqueeze(0) - mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0] - mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly - mask_data.filter(keep_by_nms) - - return mask_data diff --git a/spaces/MakiAi/Image2VideoProcessingPipelin/modules/Transition/SimpleVideoMerger.py b/spaces/MakiAi/Image2VideoProcessingPipelin/modules/Transition/SimpleVideoMerger.py deleted file mode 100644 index e3b9fc8185b3484cea9ef0e41ee7fc442c08b35c..0000000000000000000000000000000000000000 --- a/spaces/MakiAi/Image2VideoProcessingPipelin/modules/Transition/SimpleVideoMerger.py +++ /dev/null @@ -1,52 +0,0 @@ -from PIL import Image, ImageFilter -import random -import os -from pathlib import Path -import cv2 -import numpy as np - - -class SimpleVideoMerger: - def __init__(self, fps: int = 30): - self.fps = fps - - def merge_videos(self, input_folder: str, output_filename: str): - video_files = [f for f in Path(input_folder).glob("*.mp4")] - - if not video_files: - print("No video files found in the specified directory.") - return - - videos = [] - - for video_file in video_files: - video = cv2.VideoCapture(str(video_file)) - videos.append(video) - - width = int(videos[0].get(cv2.CAP_PROP_FRAME_WIDTH)) - height = int(videos[0].get(cv2.CAP_PROP_FRAME_HEIGHT)) - - fourcc = cv2.VideoWriter_fourcc(*'MP4V') - out = cv2.VideoWriter(output_filename, fourcc, self.fps, (width, height)) - - for i, video in enumerate(videos): - ret, frame = video.read() - - while ret: - out.write(frame) - ret, frame = video.read() - - video.release() - - out.release() - - print(f"Concatenated video saved to {output_filename}.") - -if __name__ == '__main__': - # 使用例 (コメントアウトされています) - merger = SimpleVideoMerger() - input_folder_path = r"image\Echoes-of-Creation_Blurred_mov" - output_folder_path = f"{input_folder_path}_Final" - os.makedirs(output_folder_path, exist_ok=True) - output_video_path = os.path.join(output_folder_path, "concatenated_video.mp4") - merger.merge_videos(input_folder_path, output_video_path) \ No newline at end of file diff --git a/spaces/Malolactica/amigosdejuegos/Dockerfile b/spaces/Malolactica/amigosdejuegos/Dockerfile deleted file mode 100644 index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000 --- a/spaces/Malolactica/amigosdejuegos/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] diff --git a/spaces/MathysL/AutoGPT4/tests/unit/test_chat.py b/spaces/MathysL/AutoGPT4/tests/unit/test_chat.py deleted file mode 100644 index 774f4103762c28d5a02e89c14b224fae0bc0756a..0000000000000000000000000000000000000000 --- a/spaces/MathysL/AutoGPT4/tests/unit/test_chat.py +++ /dev/null @@ -1,86 +0,0 @@ -# Generated by CodiumAI -import time -import unittest -from unittest.mock import patch - -from autogpt.chat import create_chat_message, generate_context - - -class TestChat(unittest.TestCase): - # Tests that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content. - def test_happy_path_role_content(self): - result = create_chat_message("system", "Hello, world!") - self.assertEqual(result, {"role": "system", "content": "Hello, world!"}) - - # Tests that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content. - def test_empty_role_content(self): - result = create_chat_message("", "") - self.assertEqual(result, {"role": "", "content": ""}) - - # Tests the behavior of the generate_context function when all input parameters are empty. - @patch("time.strftime") - def test_generate_context_empty_inputs(self, mock_strftime): - # Mock the time.strftime function to return a fixed value - mock_strftime.return_value = "Sat Apr 15 00:00:00 2023" - # Arrange - prompt = "" - relevant_memory = "" - full_message_history = [] - model = "gpt-3.5-turbo-0301" - - # Act - result = generate_context(prompt, relevant_memory, full_message_history, model) - - # Assert - expected_result = ( - -1, - 47, - 3, - [ - {"role": "system", "content": ""}, - { - "role": "system", - "content": f"The current time and date is {time.strftime('%c')}", - }, - { - "role": "system", - "content": f"This reminds you of these events from your past:\n\n\n", - }, - ], - ) - self.assertEqual(result, expected_result) - - # Tests that the function successfully generates a current_context given valid inputs. - def test_generate_context_valid_inputs(self): - # Given - prompt = "What is your favorite color?" - relevant_memory = "You once painted your room blue." - full_message_history = [ - create_chat_message("user", "Hi there!"), - create_chat_message("assistant", "Hello! How can I assist you today?"), - create_chat_message("user", "Can you tell me a joke?"), - create_chat_message( - "assistant", - "Why did the tomato turn red? Because it saw the salad dressing!", - ), - create_chat_message("user", "Haha, that's funny."), - ] - model = "gpt-3.5-turbo-0301" - - # When - result = generate_context(prompt, relevant_memory, full_message_history, model) - - # Then - self.assertIsInstance(result[0], int) - self.assertIsInstance(result[1], int) - self.assertIsInstance(result[2], int) - self.assertIsInstance(result[3], list) - self.assertGreaterEqual(result[0], 0) - self.assertGreaterEqual(result[1], 0) - self.assertGreaterEqual(result[2], 0) - self.assertGreaterEqual( - len(result[3]), 3 - ) # current_context should have at least 3 messages - self.assertLessEqual( - result[1], 2048 - ) # token limit for GPT-3.5-turbo-0301 is 2048 tokens diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/points_in_boxes.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/points_in_boxes.py deleted file mode 100644 index 4003173a53052161dbcd687a2fa1d755642fdab8..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/points_in_boxes.py +++ /dev/null @@ -1,133 +0,0 @@ -import torch - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', [ - 'points_in_boxes_part_forward', 'points_in_boxes_cpu_forward', - 'points_in_boxes_all_forward' -]) - - -def points_in_boxes_part(points, boxes): - """Find the box in which each point is (CUDA). - - Args: - points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate - boxes (torch.Tensor): [B, T, 7], - num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in - LiDAR/DEPTH coordinate, (x, y, z) is the bottom center - - Returns: - box_idxs_of_pts (torch.Tensor): (B, M), default background = -1 - """ - assert points.shape[0] == boxes.shape[0], \ - 'Points and boxes should have the same batch size, ' \ - f'but got {points.shape[0]} and {boxes.shape[0]}' - assert boxes.shape[2] == 7, \ - 'boxes dimension should be 7, ' \ - f'but got unexpected shape {boxes.shape[2]}' - assert points.shape[2] == 3, \ - 'points dimension should be 3, ' \ - f'but got unexpected shape {points.shape[2]}' - batch_size, num_points, _ = points.shape - - box_idxs_of_pts = points.new_zeros((batch_size, num_points), - dtype=torch.int).fill_(-1) - - # If manually put the tensor 'points' or 'boxes' on a device - # which is not the current device, some temporary variables - # will be created on the current device in the cuda op, - # and the output will be incorrect. - # Therefore, we force the current device to be the same - # as the device of the tensors if it was not. - # Please refer to https://github.com/open-mmlab/mmdetection3d/issues/305 - # for the incorrect output before the fix. - points_device = points.get_device() - assert points_device == boxes.get_device(), \ - 'Points and boxes should be put on the same device' - if torch.cuda.current_device() != points_device: - torch.cuda.set_device(points_device) - - ext_module.points_in_boxes_part_forward(boxes.contiguous(), - points.contiguous(), - box_idxs_of_pts) - - return box_idxs_of_pts - - -def points_in_boxes_cpu(points, boxes): - """Find all boxes in which each point is (CPU). The CPU version of - :meth:`points_in_boxes_all`. - - Args: - points (torch.Tensor): [B, M, 3], [x, y, z] in - LiDAR/DEPTH coordinate - boxes (torch.Tensor): [B, T, 7], - num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz], - (x, y, z) is the bottom center. - - Returns: - box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0. - """ - assert points.shape[0] == boxes.shape[0], \ - 'Points and boxes should have the same batch size, ' \ - f'but got {points.shape[0]} and {boxes.shape[0]}' - assert boxes.shape[2] == 7, \ - 'boxes dimension should be 7, ' \ - f'but got unexpected shape {boxes.shape[2]}' - assert points.shape[2] == 3, \ - 'points dimension should be 3, ' \ - f'but got unexpected shape {points.shape[2]}' - batch_size, num_points, _ = points.shape - num_boxes = boxes.shape[1] - - point_indices = points.new_zeros((batch_size, num_boxes, num_points), - dtype=torch.int) - for b in range(batch_size): - ext_module.points_in_boxes_cpu_forward(boxes[b].float().contiguous(), - points[b].float().contiguous(), - point_indices[b]) - point_indices = point_indices.transpose(1, 2) - - return point_indices - - -def points_in_boxes_all(points, boxes): - """Find all boxes in which each point is (CUDA). - - Args: - points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate - boxes (torch.Tensor): [B, T, 7], - num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz], - (x, y, z) is the bottom center. - - Returns: - box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0. - """ - assert boxes.shape[0] == points.shape[0], \ - 'Points and boxes should have the same batch size, ' \ - f'but got {boxes.shape[0]} and {boxes.shape[0]}' - assert boxes.shape[2] == 7, \ - 'boxes dimension should be 7, ' \ - f'but got unexpected shape {boxes.shape[2]}' - assert points.shape[2] == 3, \ - 'points dimension should be 3, ' \ - f'but got unexpected shape {points.shape[2]}' - batch_size, num_points, _ = points.shape - num_boxes = boxes.shape[1] - - box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes), - dtype=torch.int).fill_(0) - - # Same reason as line 25-32 - points_device = points.get_device() - assert points_device == boxes.get_device(), \ - 'Points and boxes should be put on the same device' - if torch.cuda.current_device() != points_device: - torch.cuda.set_device(points_device) - - ext_module.points_in_boxes_all_forward(boxes.contiguous(), - points.contiguous(), - box_idxs_of_pts) - - return box_idxs_of_pts diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/pipelines/formating.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/pipelines/formating.py deleted file mode 100644 index 97db85f4f9db39fb86ba77ead7d1a8407d810adb..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/pipelines/formating.py +++ /dev/null @@ -1,288 +0,0 @@ -from collections.abc import Sequence - -import annotator.uniformer.mmcv as mmcv -import numpy as np -import torch -from annotator.uniformer.mmcv.parallel import DataContainer as DC - -from ..builder import PIPELINES - - -def to_tensor(data): - """Convert objects of various python types to :obj:`torch.Tensor`. - - Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, - :class:`Sequence`, :class:`int` and :class:`float`. - - Args: - data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to - be converted. - """ - - if isinstance(data, torch.Tensor): - return data - elif isinstance(data, np.ndarray): - return torch.from_numpy(data) - elif isinstance(data, Sequence) and not mmcv.is_str(data): - return torch.tensor(data) - elif isinstance(data, int): - return torch.LongTensor([data]) - elif isinstance(data, float): - return torch.FloatTensor([data]) - else: - raise TypeError(f'type {type(data)} cannot be converted to tensor.') - - -@PIPELINES.register_module() -class ToTensor(object): - """Convert some results to :obj:`torch.Tensor` by given keys. - - Args: - keys (Sequence[str]): Keys that need to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert data in results to :obj:`torch.Tensor`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted - to :obj:`torch.Tensor`. - """ - - for key in self.keys: - results[key] = to_tensor(results[key]) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class ImageToTensor(object): - """Convert image to :obj:`torch.Tensor` by given keys. - - The dimension order of input image is (H, W, C). The pipeline will convert - it to (C, H, W). If only 2 dimension (H, W) is given, the output would be - (1, H, W). - - Args: - keys (Sequence[str]): Key of images to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and transposed to (C, H, W) order. - """ - - for key in self.keys: - img = results[key] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - results[key] = to_tensor(img.transpose(2, 0, 1)) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class Transpose(object): - """Transpose some results by given keys. - - Args: - keys (Sequence[str]): Keys of results to be transposed. - order (Sequence[int]): Order of transpose. - """ - - def __init__(self, keys, order): - self.keys = keys - self.order = order - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and transposed to (C, H, W) order. - """ - - for key in self.keys: - results[key] = results[key].transpose(self.order) - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, order={self.order})' - - -@PIPELINES.register_module() -class ToDataContainer(object): - """Convert results to :obj:`mmcv.DataContainer` by given fields. - - Args: - fields (Sequence[dict]): Each field is a dict like - ``dict(key='xxx', **kwargs)``. The ``key`` in result will - be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. - Default: ``(dict(key='img', stack=True), - dict(key='gt_semantic_seg'))``. - """ - - def __init__(self, - fields=(dict(key='img', - stack=True), dict(key='gt_semantic_seg'))): - self.fields = fields - - def __call__(self, results): - """Call function to convert data in results to - :obj:`mmcv.DataContainer`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted to - :obj:`mmcv.DataContainer`. - """ - - for field in self.fields: - field = field.copy() - key = field.pop('key') - results[key] = DC(results[key], **field) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(fields={self.fields})' - - -@PIPELINES.register_module() -class DefaultFormatBundle(object): - """Default formatting bundle. - - It simplifies the pipeline of formatting common fields, including "img" - and "gt_semantic_seg". These fields are formatted as follows. - - - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, - (3)to DataContainer (stack=True) - """ - - def __call__(self, results): - """Call function to transform and format common fields in results. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data that is formatted with - default bundle. - """ - - if 'img' in results: - img = results['img'] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - img = np.ascontiguousarray(img.transpose(2, 0, 1)) - results['img'] = DC(to_tensor(img), stack=True) - if 'gt_semantic_seg' in results: - # convert to long - results['gt_semantic_seg'] = DC( - to_tensor(results['gt_semantic_seg'][None, - ...].astype(np.int64)), - stack=True) - return results - - def __repr__(self): - return self.__class__.__name__ - - -@PIPELINES.register_module() -class Collect(object): - """Collect data from the loader relevant to the specific task. - - This is usually the last stage of the data loader pipeline. Typically keys - is set to some subset of "img", "gt_semantic_seg". - - The "img_meta" item is always populated. The contents of the "img_meta" - dictionary depends on "meta_keys". By default this includes: - - - "img_shape": shape of the image input to the network as a tuple - (h, w, c). Note that images may be zero padded on the bottom/right - if the batch tensor is larger than this shape. - - - "scale_factor": a float indicating the preprocessing scale - - - "flip": a boolean indicating if image flip transform was used - - - "filename": path to the image file - - - "ori_shape": original shape of the image as a tuple (h, w, c) - - - "pad_shape": image shape after padding - - - "img_norm_cfg": a dict of normalization information: - - mean - per channel mean subtraction - - std - per channel std divisor - - to_rgb - bool indicating if bgr was converted to rgb - - Args: - keys (Sequence[str]): Keys of results to be collected in ``data``. - meta_keys (Sequence[str], optional): Meta keys to be converted to - ``mmcv.DataContainer`` and collected in ``data[img_metas]``. - Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', - 'pad_shape', 'scale_factor', 'flip', 'flip_direction', - 'img_norm_cfg')`` - """ - - def __init__(self, - keys, - meta_keys=('filename', 'ori_filename', 'ori_shape', - 'img_shape', 'pad_shape', 'scale_factor', 'flip', - 'flip_direction', 'img_norm_cfg')): - self.keys = keys - self.meta_keys = meta_keys - - def __call__(self, results): - """Call function to collect keys in results. The keys in ``meta_keys`` - will be converted to :obj:mmcv.DataContainer. - - Args: - results (dict): Result dict contains the data to collect. - - Returns: - dict: The result dict contains the following keys - - keys in``self.keys`` - - ``img_metas`` - """ - - data = {} - img_meta = {} - for key in self.meta_keys: - img_meta[key] = results[key] - data['img_metas'] = DC(img_meta, cpu_only=True) - for key in self.keys: - data[key] = results[key] - return data - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, meta_keys={self.meta_keys})' diff --git a/spaces/Mountchicken/MAERec-Gradio/tools/dataset_converters/textdet/kaist_converter.py b/spaces/Mountchicken/MAERec-Gradio/tools/dataset_converters/textdet/kaist_converter.py deleted file mode 100644 index 3f95804d1dda27a88db247e177c3f7522361faf5..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/tools/dataset_converters/textdet/kaist_converter.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import math -import os -import os.path as osp -import xml.etree.ElementTree as ET - -import mmcv -import mmengine - -from mmocr.utils import dump_ocr_data - - -def collect_files(img_dir, gt_dir, ratio): - """Collect all images and their corresponding groundtruth files. - - Args: - img_dir (str): The image directory - gt_dir (str): The groundtruth directory - ratio (float): Split ratio for val set - - Returns: - files (list): The list of tuples (img_file, groundtruth_file) - """ - assert isinstance(img_dir, str) - assert img_dir - assert isinstance(gt_dir, str) - assert gt_dir - assert isinstance(ratio, float) - assert ratio < 1.0, 'val_ratio should be a float between 0.0 to 1.0' - - ann_list, imgs_list = [], [] - for img_file in os.listdir(img_dir): - ann_list.append(osp.join(gt_dir, img_file.split('.')[0] + '.xml')) - imgs_list.append(osp.join(img_dir, img_file)) - - all_files = list(zip(sorted(imgs_list), sorted(ann_list))) - assert len(all_files), f'No images found in {img_dir}' - print(f'Loaded {len(all_files)} images from {img_dir}') - - trn_files, val_files = [], [] - if ratio > 0: - for i, file in enumerate(all_files): - if i % math.floor(1 / ratio): - trn_files.append(file) - else: - val_files.append(file) - else: - trn_files, val_files = all_files, [] - - print(f'training #{len(trn_files)}, val #{len(val_files)}') - - return trn_files, val_files - - -def collect_annotations(files, nproc=1): - """Collect the annotation information. - - Args: - files (list): The list of tuples (image_file, groundtruth_file) - nproc (int): The number of process to collect annotations - - Returns: - images (list): The list of image information dicts - """ - assert isinstance(files, list) - assert isinstance(nproc, int) - - if nproc > 1: - images = mmengine.track_parallel_progress( - load_img_info, files, nproc=nproc) - else: - images = mmengine.track_progress(load_img_info, files) - - return images - - -def load_img_info(files): - """Load the information of one image. - - Args: - files (tuple): The tuple of (img_file, groundtruth_file) - - Returns: - img_info (dict): The dict of the img and annotation information - """ - assert isinstance(files, tuple) - - img_file, gt_file = files - assert osp.basename(gt_file).split('.')[0] == osp.basename(img_file).split( - '.')[0] - # read imgs while ignoring orientations - img = mmcv.imread(img_file, 'unchanged') - - img_info = dict( - file_name=osp.join(osp.basename(img_file)), - height=img.shape[0], - width=img.shape[1], - segm_file=osp.join(osp.basename(gt_file))) - - if osp.splitext(gt_file)[1] == '.xml': - img_info = load_xml_info(gt_file, img_info) - else: - raise NotImplementedError - - return img_info - - -def load_xml_info(gt_file, img_info): - """Collect the annotation information. - - Annotation Format - - DSC02306.JPG - - - - - - - - - - - - - no - 2 - - - - - Args: - gt_file (str): The path to ground-truth - img_info (dict): The dict of the img and annotation information - - Returns: - img_info (dict): The dict of the img and annotation information - """ - - obj = ET.parse(gt_file) - root = obj.getroot() - anno_info = [] - for word in root.iter('word'): - x, y = max(0, int(word.attrib['x'])), max(0, int(word.attrib['y'])) - w, h = int(word.attrib['width']), int(word.attrib['height']) - bbox = [x, y, w, h] - segmentation = [x, y, x + w, y, x + w, y + h, x, y + h] - - anno = dict( - iscrowd=0, - category_id=1, - bbox=bbox, - area=w * h, - segmentation=[segmentation]) - anno_info.append(anno) - - img_info.update(anno_info=anno_info) - - return img_info - - -def parse_args(): - parser = argparse.ArgumentParser( - description='Generate training and val set of KAIST ') - parser.add_argument('root_path', help='Root dir path of KAIST') - parser.add_argument( - '--val-ratio', help='Split ratio for val set', default=0.0, type=float) - parser.add_argument( - '--nproc', default=1, type=int, help='Number of process') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - root_path = args.root_path - ratio = args.val_ratio - - trn_files, val_files = collect_files( - osp.join(root_path, 'imgs'), osp.join(root_path, 'annotations'), ratio) - - # Train set - trn_infos = collect_annotations(trn_files, nproc=args.nproc) - with mmengine.Timer( - print_tmpl='It takes {}s to convert KAIST Training annotation'): - dump_ocr_data(trn_infos, osp.join(root_path, - 'instances_training.json'), - 'textdet') - - # Val set - if len(val_files) > 0: - val_infos = collect_annotations(val_files, nproc=args.nproc) - with mmengine.Timer( - print_tmpl='It takes {}s to convert KAIST Val annotation'): - dump_ocr_data(val_infos, osp.join(root_path, 'instances_val.json'), - 'textdet') - - -if __name__ == '__main__': - main() diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/multi_channel_attention_test.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/multi_channel_attention_test.py deleted file mode 100644 index ab6e0e7fec48635d09e6e30c3ad247044ae9785f..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/multi_channel_attention_test.py +++ /dev/null @@ -1,56 +0,0 @@ -# Lint as: python3 -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for nlp.nhnet.multi_channel_attention.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import tensorflow as tf - -from official.nlp.modeling.layers import multi_channel_attention - - -class MultiChannelAttentionTest(tf.test.TestCase): - - def test_doc_attention(self): - num_heads = 2 - doc_attention = multi_channel_attention.VotingAttention( - num_heads, head_size=8) - num_docs = 3 - inputs = np.zeros((2, num_docs, 10, 16), dtype=np.float32) - doc_mask = np.zeros((2, num_docs), dtype=np.float32) - outputs = doc_attention(inputs, doc_mask) - self.assertEqual(outputs.shape, (2, num_docs)) - - def test_multi_channel_attention(self): - num_heads = 2 - num_docs = 5 - attention_layer = multi_channel_attention.MultiChannelAttention( - num_heads, key_size=2) - - from_data = 10 * np.random.random_sample((3, 4, 8)) - to_data = 10 * np.random.random_sample((3, num_docs, 2, 8)) - mask_data = np.random.randint(2, size=(3, num_docs, 4, 2)) - doc_probs = np.random.randint( - 2, size=(3, num_heads, 4, num_docs)).astype(float) - outputs = attention_layer([from_data, to_data, doc_probs], mask_data) - self.assertEqual(outputs.shape, (3, 4, 8)) - - -if __name__ == "__main__": - tf.test.main() diff --git a/spaces/NCTCMumbai/NCTC/models/official/recommendation/stat_utils.py b/spaces/NCTCMumbai/NCTC/models/official/recommendation/stat_utils.py deleted file mode 100644 index 658a2721e98a88d71dc2ac4562366283ffd2fc47..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/recommendation/stat_utils.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2018 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Statistics utility functions of NCF.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import numpy as np - - -def random_int32(): - return np.random.randint(low=0, high=np.iinfo(np.int32).max, dtype=np.int32) - - -def permutation(args): - """Fork safe permutation function. - - This function can be called within a multiprocessing worker and give - appropriately random results. - - Args: - args: A size two tuple that will unpacked into the size of the permutation - and the random seed. This form is used because starmap is not universally - available. - - returns: - A NumPy array containing a random permutation. - """ - x, seed = args - - # If seed is None NumPy will seed randomly. - state = np.random.RandomState(seed=seed) # pylint: disable=no-member - output = np.arange(x, dtype=np.int32) - state.shuffle(output) - return output - - -def very_slightly_biased_randint(max_val_vector): - sample_dtype = np.uint64 - out_dtype = max_val_vector.dtype - samples = np.random.randint(low=0, high=np.iinfo(sample_dtype).max, - size=max_val_vector.shape, dtype=sample_dtype) - return np.mod(samples, max_val_vector.astype(sample_dtype)).astype(out_dtype) - - -def mask_duplicates(x, axis=1): # type: (np.ndarray, int) -> np.ndarray - """Identify duplicates from sampling with replacement. - - Args: - x: A 2D NumPy array of samples - axis: The axis along which to de-dupe. - - Returns: - A NumPy array with the same shape as x with one if an element appeared - previously along axis 1, else zero. - """ - if axis != 1: - raise NotImplementedError - - x_sort_ind = np.argsort(x, axis=1, kind="mergesort") - sorted_x = x[np.arange(x.shape[0])[:, np.newaxis], x_sort_ind] - - # compute the indices needed to map values back to their original position. - inv_x_sort_ind = np.argsort(x_sort_ind, axis=1, kind="mergesort") - - # Compute the difference of adjacent sorted elements. - diffs = sorted_x[:, :-1] - sorted_x[:, 1:] - - # We are only interested in whether an element is zero. Therefore left padding - # with ones to restore the original shape is sufficient. - diffs = np.concatenate( - [np.ones((diffs.shape[0], 1), dtype=diffs.dtype), diffs], axis=1) - - # Duplicate values will have a difference of zero. By definition the first - # element is never a duplicate. - return np.where(diffs[np.arange(x.shape[0])[:, np.newaxis], - inv_x_sort_ind], 0, 1) diff --git a/spaces/NCTCMumbai/NCTC/models/research/autoencoder/MaskingNoiseAutoencoderRunner.py b/spaces/NCTCMumbai/NCTC/models/research/autoencoder/MaskingNoiseAutoencoderRunner.py deleted file mode 100644 index b776302e286ff740ba7b8e6f679a54b23944df12..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/autoencoder/MaskingNoiseAutoencoderRunner.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import sklearn.preprocessing as prep -import tensorflow as tf -from tensorflow.examples.tutorials.mnist import input_data - -from autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder - -mnist = input_data.read_data_sets('MNIST_data', one_hot=True) - - -def standard_scale(X_train, X_test): - preprocessor = prep.StandardScaler().fit(X_train) - X_train = preprocessor.transform(X_train) - X_test = preprocessor.transform(X_test) - return X_train, X_test - - -def get_random_block_from_data(data, batch_size): - start_index = np.random.randint(0, len(data) - batch_size) - return data[start_index:(start_index + batch_size)] - - -X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) - -n_samples = int(mnist.train.num_examples) -training_epochs = 100 -batch_size = 128 -display_step = 1 - -autoencoder = MaskingNoiseAutoencoder( - n_input=784, - n_hidden=200, - transfer_function=tf.nn.softplus, - optimizer=tf.train.AdamOptimizer(learning_rate=0.001), - dropout_probability=0.95) - -for epoch in range(training_epochs): - avg_cost = 0. - total_batch = int(n_samples / batch_size) - for i in range(total_batch): - batch_xs = get_random_block_from_data(X_train, batch_size) - - cost = autoencoder.partial_fit(batch_xs) - - avg_cost += cost / n_samples * batch_size - - if epoch % display_step == 0: - print("Epoch:", '%d,' % (epoch + 1), - "Cost:", "{:.9f}".format(avg_cost)) - -print("Total cost: " + str(autoencoder.calc_total_cost(X_test))) diff --git a/spaces/NingKanae/anime-voice-generator/transforms.py b/spaces/NingKanae/anime-voice-generator/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/NingKanae/anime-voice-generator/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/roberta/README.custom_classification.md b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/roberta/README.custom_classification.md deleted file mode 100644 index 7254bb7d178760ef5b847901bbcac3711af33ca2..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/roberta/README.custom_classification.md +++ /dev/null @@ -1,168 +0,0 @@ -# Finetuning RoBERTa on a custom classification task - -This example shows how to finetune RoBERTa on the IMDB dataset, but should illustrate the process for most classification tasks. - -### 1) Get the data - -```bash -wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz -tar zxvf aclImdb_v1.tar.gz -``` - - -### 2) Format data - -`IMDB` data has one data-sample in each file, below python code-snippet converts it one file for train and valid each for ease of processing. -```python -import argparse -import os -import random -from glob import glob - -random.seed(0) - -def main(args): - for split in ['train', 'test']: - samples = [] - for class_label in ['pos', 'neg']: - fnames = glob(os.path.join(args.datadir, split, class_label) + '/*.txt') - for fname in fnames: - with open(fname) as fin: - line = fin.readline() - samples.append((line, 1 if class_label == 'pos' else 0)) - random.shuffle(samples) - out_fname = 'train' if split == 'train' else 'dev' - f1 = open(os.path.join(args.datadir, out_fname + '.input0'), 'w') - f2 = open(os.path.join(args.datadir, out_fname + '.label'), 'w') - for sample in samples: - f1.write(sample[0] + '\n') - f2.write(str(sample[1]) + '\n') - f1.close() - f2.close() - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--datadir', default='aclImdb') - args = parser.parse_args() - main(args) -``` - - -### 3) BPE encode - -Run `multiprocessing_bpe_encoder`, you can also do this in previous step for each sample but that might be slower. -```bash -# Download encoder.json and vocab.bpe -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' - -for SPLIT in train dev; do - python -m examples.roberta.multiprocessing_bpe_encoder \ - --encoder-json encoder.json \ - --vocab-bpe vocab.bpe \ - --inputs "aclImdb/$SPLIT.input0" \ - --outputs "aclImdb/$SPLIT.input0.bpe" \ - --workers 60 \ - --keep-empty -done -``` - - -### 4) Preprocess data - -```bash -# Download fairseq dictionary. -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt' - -fairseq-preprocess \ - --only-source \ - --trainpref "aclImdb/train.input0.bpe" \ - --validpref "aclImdb/dev.input0.bpe" \ - --destdir "IMDB-bin/input0" \ - --workers 60 \ - --srcdict dict.txt - -fairseq-preprocess \ - --only-source \ - --trainpref "aclImdb/train.label" \ - --validpref "aclImdb/dev.label" \ - --destdir "IMDB-bin/label" \ - --workers 60 - -``` - - -### 5) Run training - -```bash -TOTAL_NUM_UPDATES=7812 # 10 epochs through IMDB for bsz 32 -WARMUP_UPDATES=469 # 6 percent of the number of updates -LR=1e-05 # Peak LR for polynomial LR scheduler. -HEAD_NAME=imdb_head # Custom name for the classification head. -NUM_CLASSES=2 # Number of classes for the classification task. -MAX_SENTENCES=8 # Batch size. -ROBERTA_PATH=/path/to/roberta.large/model.pt - -CUDA_VISIBLE_DEVICES=0 fairseq-train IMDB-bin/ \ - --restore-file $ROBERTA_PATH \ - --max-positions 512 \ - --batch-size $MAX_SENTENCES \ - --max-tokens 4400 \ - --task sentence_prediction \ - --reset-optimizer --reset-dataloader --reset-meters \ - --required-batch-size-multiple 1 \ - --init-token 0 --separator-token 2 \ - --arch roberta_large \ - --criterion sentence_prediction \ - --classification-head-name $HEAD_NAME \ - --num-classes $NUM_CLASSES \ - --dropout 0.1 --attention-dropout 0.1 \ - --weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \ - --clip-norm 0.0 \ - --lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \ - --fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \ - --max-epoch 10 \ - --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \ - --shorten-method "truncate" \ - --find-unused-parameters \ - --update-freq 4 -``` - -The above command will finetune RoBERTa-large with an effective batch-size of 32 -sentences (`--batch-size=8 --update-freq=4`). The expected -`best-validation-accuracy` after 10 epochs is ~96.5%. - -If you run out of GPU memory, try decreasing `--batch-size` and increase -`--update-freq` to compensate. - - -### 6) Load model using hub interface - -Now we can load the trained model checkpoint using the RoBERTa hub interface. - -Assuming your checkpoints are stored in `checkpoints/`: -```python -from fairseq.models.roberta import RobertaModel -roberta = RobertaModel.from_pretrained( - 'checkpoints', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='IMDB-bin' -) -roberta.eval() # disable dropout -``` - -Finally you can make predictions using the `imdb_head` (or whatever you set -`--classification-head-name` to during training): -```python -label_fn = lambda label: roberta.task.label_dictionary.string( - [label + roberta.task.label_dictionary.nspecial] -) - -tokens = roberta.encode('Best movie this year') -pred = label_fn(roberta.predict('imdb_head', tokens).argmax().item()) -assert pred == '1' # positive - -tokens = roberta.encode('Worst movie ever') -pred = label_fn(roberta.predict('imdb_head', tokens).argmax().item()) -assert pred == '0' # negative -``` diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/registry.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/registry.py deleted file mode 100644 index f3b9406043d75a51d7bf4af5294f82b33a8f9a5e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/registry.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from argparse import Namespace - -from typing import Union -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.utils import merge_with_parent -from hydra.core.config_store import ConfigStore -from omegaconf import DictConfig - -REGISTRIES = {} - - -def setup_registry(registry_name: str, base_class=None, default=None, required=False): - assert registry_name.startswith("--") - registry_name = registry_name[2:].replace("-", "_") - - REGISTRY = {} - REGISTRY_CLASS_NAMES = set() - DATACLASS_REGISTRY = {} - - # maintain a registry of all registries - if registry_name in REGISTRIES: - return # registry already exists - REGISTRIES[registry_name] = { - "registry": REGISTRY, - "default": default, - "dataclass_registry": DATACLASS_REGISTRY, - } - - def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs): - if isinstance(cfg, DictConfig): - choice = cfg._name - - if choice and choice in DATACLASS_REGISTRY: - dc = DATACLASS_REGISTRY[choice] - cfg = merge_with_parent(dc(), cfg) - elif isinstance(cfg, str): - choice = cfg - if choice in DATACLASS_REGISTRY: - cfg = DATACLASS_REGISTRY[choice]() - else: - choice = getattr(cfg, registry_name, None) - if choice in DATACLASS_REGISTRY: - cfg = DATACLASS_REGISTRY[choice].from_namespace(cfg) - - if choice is None: - if required: - raise ValueError("{} is required!".format(registry_name)) - return None - - cls = REGISTRY[choice] - if hasattr(cls, "build_" + registry_name): - builder = getattr(cls, "build_" + registry_name) - else: - builder = cls - - return builder(cfg, *extra_args, **extra_kwargs) - - def register_x(name, dataclass=None): - def register_x_cls(cls): - if name in REGISTRY: - raise ValueError( - "Cannot register duplicate {} ({})".format(registry_name, name) - ) - if cls.__name__ in REGISTRY_CLASS_NAMES: - raise ValueError( - "Cannot register {} with duplicate class name ({})".format( - registry_name, cls.__name__ - ) - ) - if base_class is not None and not issubclass(cls, base_class): - raise ValueError( - "{} must extend {}".format(cls.__name__, base_class.__name__) - ) - - if dataclass is not None and not issubclass(dataclass, FairseqDataclass): - raise ValueError( - "Dataclass {} must extend FairseqDataclass".format(dataclass) - ) - - cls.__dataclass = dataclass - if cls.__dataclass is not None: - DATACLASS_REGISTRY[name] = cls.__dataclass - - cs = ConfigStore.instance() - node = dataclass() - node._name = name - cs.store(name=name, group=registry_name, node=node, provider="fairseq") - - REGISTRY[name] = cls - - return cls - - return register_x_cls - - return build_x, register_x, REGISTRY, DATACLASS_REGISTRY diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/speech_to_text/s2t_transformer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/speech_to_text/s2t_transformer.py deleted file mode 100644 index aff9d0ffc7b7e671c476ff28d1cd945e9ff41519..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/speech_to_text/s2t_transformer.py +++ /dev/null @@ -1,502 +0,0 @@ -#!/usr/bin/env python3 - -import logging -import math -from typing import Dict, List, Optional, Tuple -from pathlib import Path - -import torch -import torch.nn as nn -from fairseq import checkpoint_utils, utils -from fairseq.data.data_utils import lengths_to_padding_mask -from fairseq.models import ( - FairseqEncoder, - FairseqEncoderDecoderModel, - register_model, - register_model_architecture, -) -from fairseq.models.transformer import Embedding, TransformerDecoder -from fairseq.modules import ( - FairseqDropout, - LayerNorm, - PositionalEmbedding, - TransformerEncoderLayer, -) -from torch import Tensor - - -logger = logging.getLogger(__name__) - - -class Conv1dSubsampler(nn.Module): - """Convolutional subsampler: a stack of 1D convolution (along temporal - dimension) followed by non-linear activation via gated linear units - (https://arxiv.org/abs/1911.08460) - - Args: - in_channels (int): the number of input channels - mid_channels (int): the number of intermediate channels - out_channels (int): the number of output channels - kernel_sizes (List[int]): the kernel size for each convolutional layer - """ - - def __init__( - self, - in_channels: int, - mid_channels: int, - out_channels: int, - kernel_sizes: List[int] = (3, 3), - ): - super(Conv1dSubsampler, self).__init__() - self.n_layers = len(kernel_sizes) - self.conv_layers = nn.ModuleList( - nn.Conv1d( - in_channels if i == 0 else mid_channels // 2, - mid_channels if i < self.n_layers - 1 else out_channels * 2, - k, - stride=2, - padding=k // 2, - ) - for i, k in enumerate(kernel_sizes) - ) - - def get_out_seq_lens_tensor(self, in_seq_lens_tensor): - out = in_seq_lens_tensor.clone() - for _ in range(self.n_layers): - out = ((out.float() - 1) / 2 + 1).floor().long() - return out - - def forward(self, src_tokens, src_lengths): - bsz, in_seq_len, _ = src_tokens.size() # B x T x (C x D) - x = src_tokens.transpose(1, 2).contiguous() # -> B x (C x D) x T - for conv in self.conv_layers: - x = conv(x) - x = nn.functional.glu(x, dim=1) - _, _, out_seq_len = x.size() - x = x.transpose(1, 2).transpose(0, 1).contiguous() # -> T x B x (C x D) - return x, self.get_out_seq_lens_tensor(src_lengths) - - -@register_model("s2t_transformer") -class S2TTransformerModel(FairseqEncoderDecoderModel): - """Adapted Transformer model (https://arxiv.org/abs/1706.03762) for - speech-to-text tasks. The Transformer encoder/decoder remains the same. - A trainable input subsampler is prepended to the Transformer encoder to - project inputs into the encoder dimension as well as downsample input - sequence for computational efficiency.""" - - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - # input - parser.add_argument( - "--conv-kernel-sizes", - type=str, - metavar="N", - help="kernel sizes of Conv1d subsampling layers", - ) - parser.add_argument( - "--conv-channels", - type=int, - metavar="N", - help="# of channels in Conv1d subsampling layers", - ) - # Transformer - parser.add_argument( - "--activation-fn", - type=str, - default="relu", - choices=utils.get_available_activation_fns(), - help="activation function to use", - ) - parser.add_argument( - "--dropout", type=float, metavar="D", help="dropout probability" - ) - parser.add_argument( - "--attention-dropout", - type=float, - metavar="D", - help="dropout probability for attention weights", - ) - parser.add_argument( - "--activation-dropout", - "--relu-dropout", - type=float, - metavar="D", - help="dropout probability after activation in FFN.", - ) - parser.add_argument( - "--encoder-embed-dim", - type=int, - metavar="N", - help="encoder embedding dimension", - ) - parser.add_argument( - "--encoder-ffn-embed-dim", - type=int, - metavar="N", - help="encoder embedding dimension for FFN", - ) - parser.add_argument( - "--encoder-layers", type=int, metavar="N", help="num encoder layers" - ) - parser.add_argument( - "--encoder-attention-heads", - type=int, - metavar="N", - help="num encoder attention heads", - ) - parser.add_argument( - "--encoder-normalize-before", - action="store_true", - help="apply layernorm before each encoder block", - ) - parser.add_argument( - "--decoder-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension", - ) - parser.add_argument( - "--decoder-ffn-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension for FFN", - ) - parser.add_argument( - "--decoder-layers", type=int, metavar="N", help="num decoder layers" - ) - parser.add_argument( - "--decoder-attention-heads", - type=int, - metavar="N", - help="num decoder attention heads", - ) - parser.add_argument( - "--decoder-normalize-before", - action="store_true", - help="apply layernorm before each decoder block", - ) - parser.add_argument( - "--share-decoder-input-output-embed", - action="store_true", - help="share decoder input and output embeddings", - ) - parser.add_argument( - "--layernorm-embedding", - action="store_true", - help="add layernorm to embedding", - ) - parser.add_argument( - "--no-scale-embedding", - action="store_true", - help="if True, dont scale embeddings", - ) - parser.add_argument( - "--load-pretrained-encoder-from", - type=str, - metavar="STR", - help="model to take encoder weights from (for initialization)", - ) - parser.add_argument( - '--encoder-freezing-updates', - type=int, - metavar='N', - help='freeze encoder for first N updates' - ) - - @classmethod - def build_encoder(cls, args): - encoder = S2TTransformerEncoder(args) - pretraining_path = getattr(args, "load_pretrained_encoder_from", None) - if pretraining_path is not None: - if not Path(pretraining_path).exists(): - logger.warning( - f"skipped pretraining because {pretraining_path} does not exist" - ) - else: - encoder = checkpoint_utils.load_pretrained_component_from_model( - component=encoder, checkpoint=pretraining_path - ) - logger.info(f"loaded pretrained encoder from: {pretraining_path}") - return encoder - - @classmethod - def build_decoder(cls, args, task, embed_tokens): - return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - - # make sure all arguments are present in older models - base_architecture(args) - - def build_embedding(dictionary, embed_dim): - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - return Embedding(num_embeddings, embed_dim, padding_idx) - - decoder_embed_tokens = build_embedding( - task.target_dictionary, args.decoder_embed_dim - ) - encoder = cls.build_encoder(args) - decoder = cls.build_decoder(args, task, decoder_embed_tokens) - return cls(encoder, decoder) - - def get_normalized_probs( - self, - net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], - log_probs: bool, - sample: Optional[Dict[str, Tensor]] = None, - ): - # net_output['encoder_out'] is a (B, T, D) tensor - lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample) - lprobs.batch_first = True - return lprobs - - def forward(self, src_tokens, src_lengths, prev_output_tokens): - """ - The forward method inherited from the base class has a **kwargs - argument in its input, which is not supported in torchscript. This - method overwrites the forward method definition without **kwargs. - """ - encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths) - decoder_out = self.decoder( - prev_output_tokens=prev_output_tokens, encoder_out=encoder_out - ) - return decoder_out - - -class S2TTransformerEncoder(FairseqEncoder): - """Speech-to-text Transformer encoder that consists of input subsampler and - Transformer encoder.""" - - def __init__(self, args): - super().__init__(None) - - self.encoder_freezing_updates = args.encoder_freezing_updates - self.num_updates = 0 - - self.dropout_module = FairseqDropout( - p=args.dropout, module_name=self.__class__.__name__ - ) - self.embed_scale = math.sqrt(args.encoder_embed_dim) - if args.no_scale_embedding: - self.embed_scale = 1.0 - self.padding_idx = 1 - - self.subsample = Conv1dSubsampler( - args.input_feat_per_channel * args.input_channels, - args.conv_channels, - args.encoder_embed_dim, - [int(k) for k in args.conv_kernel_sizes.split(",")], - ) - - self.embed_positions = PositionalEmbedding( - args.max_source_positions, args.encoder_embed_dim, self.padding_idx - ) - - self.transformer_layers = nn.ModuleList( - [TransformerEncoderLayer(args) for _ in range(args.encoder_layers)] - ) - if args.encoder_normalize_before: - self.layer_norm = LayerNorm(args.encoder_embed_dim) - else: - self.layer_norm = None - - def _forward(self, src_tokens, src_lengths, return_all_hiddens=False): - x, input_lengths = self.subsample(src_tokens, src_lengths) - x = self.embed_scale * x - - encoder_padding_mask = lengths_to_padding_mask(input_lengths) - positions = self.embed_positions(encoder_padding_mask).transpose(0, 1) - x += positions - x = self.dropout_module(x) - - encoder_states = [] - - for layer in self.transformer_layers: - x = layer(x, encoder_padding_mask) - if return_all_hiddens: - encoder_states.append(x) - - if self.layer_norm is not None: - x = self.layer_norm(x) - - return { - "encoder_out": [x], # T x B x C - "encoder_padding_mask": [encoder_padding_mask] if encoder_padding_mask.any() else [], # B x T - "encoder_embedding": [], # B x T x C - "encoder_states": encoder_states, # List[T x B x C] - "src_tokens": [], - "src_lengths": [], - } - - def forward(self, src_tokens, src_lengths, return_all_hiddens=False): - if self.num_updates < self.encoder_freezing_updates: - with torch.no_grad(): - x = self._forward(src_tokens, src_lengths, - return_all_hiddens=return_all_hiddens) - else: - x = self._forward(src_tokens, src_lengths, - return_all_hiddens=return_all_hiddens) - return x - - def reorder_encoder_out(self, encoder_out, new_order): - new_encoder_out = ( - [] if len(encoder_out["encoder_out"]) == 0 - else [x.index_select(1, new_order) for x in encoder_out["encoder_out"]] - ) - - new_encoder_padding_mask = ( - [] if len(encoder_out["encoder_padding_mask"]) == 0 - else [x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"]] - ) - - new_encoder_embedding = ( - [] if len(encoder_out["encoder_embedding"]) == 0 - else [x.index_select(0, new_order) for x in encoder_out["encoder_embedding"]] - ) - - encoder_states = encoder_out["encoder_states"] - if len(encoder_states) > 0: - for idx, state in enumerate(encoder_states): - encoder_states[idx] = state.index_select(1, new_order) - - return { - "encoder_out": new_encoder_out, # T x B x C - "encoder_padding_mask": new_encoder_padding_mask, # B x T - "encoder_embedding": new_encoder_embedding, # B x T x C - "encoder_states": encoder_states, # List[T x B x C] - "src_tokens": [], # B x T - "src_lengths": [], # B x 1 - } - - def set_num_updates(self, num_updates): - super().set_num_updates(num_updates) - self.num_updates = num_updates - - -class TransformerDecoderScriptable(TransformerDecoder): - def extract_features( - self, - prev_output_tokens, - encoder_out: Optional[Dict[str, List[Tensor]]] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - full_context_alignment: bool = False, - alignment_layer: Optional[int] = None, - alignment_heads: Optional[int] = None, - ): - # call scriptable method from parent class - x, _ = self.extract_features_scriptable( - prev_output_tokens, - encoder_out, - incremental_state, - full_context_alignment, - alignment_layer, - alignment_heads, - ) - return x, None - - -@register_model_architecture(model_name="s2t_transformer", arch_name="s2t_transformer") -def base_architecture(args): - args.encoder_freezing_updates = getattr(args, "encoder_freezing_updates", 0) - # Convolutional subsampler - args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5") - args.conv_channels = getattr(args, "conv_channels", 1024) - # Transformer - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) - args.encoder_layers = getattr(args, "encoder_layers", 12) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) - args.decoder_ffn_embed_dim = getattr( - args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim - ) - args.decoder_layers = getattr(args, "decoder_layers", 6) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) - args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) - args.dropout = getattr(args, "dropout", 0.1) - args.attention_dropout = getattr(args, "attention_dropout", args.dropout) - args.activation_dropout = getattr(args, "activation_dropout", args.dropout) - args.activation_fn = getattr(args, "activation_fn", "relu") - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - args.share_decoder_input_output_embed = getattr( - args, "share_decoder_input_output_embed", False - ) - args.no_token_positional_embeddings = getattr( - args, "no_token_positional_embeddings", False - ) - args.adaptive_input = getattr(args, "adaptive_input", False) - args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) - args.decoder_output_dim = getattr( - args, "decoder_output_dim", args.decoder_embed_dim - ) - args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) - args.no_scale_embedding = getattr(args, "no_scale_embedding", False) - args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_s") -def s2t_transformer_s(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) - args.dropout = getattr(args, "dropout", 0.1) - base_architecture(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_xs") -def s2t_transformer_xs(args): - args.encoder_layers = getattr(args, "encoder_layers", 6) - args.decoder_layers = getattr(args, "decoder_layers", 3) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4) - args.dropout = getattr(args, "dropout", 0.3) - s2t_transformer_s(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_sp") -def s2t_transformer_sp(args): - args.encoder_layers = getattr(args, "encoder_layers", 16) - s2t_transformer_s(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_m") -def s2t_transformer_m(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) - args.dropout = getattr(args, "dropout", 0.15) - base_architecture(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_mp") -def s2t_transformer_mp(args): - args.encoder_layers = getattr(args, "encoder_layers", 16) - s2t_transformer_m(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_l") -def s2t_transformer_l(args): - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) - args.dropout = getattr(args, "dropout", 0.2) - base_architecture(args) - - -@register_model_architecture("s2t_transformer", "s2t_transformer_lp") -def s2t_transformer_lp(args): - args.encoder_layers = getattr(args, "encoder_layers", 16) - s2t_transformer_l(args) diff --git a/spaces/OIUGLK/bingo/src/components/chat-message.tsx b/spaces/OIUGLK/bingo/src/components/chat-message.tsx deleted file mode 100644 index bf272d8d7005cfd06c53bd213e09ea217e803549..0000000000000000000000000000000000000000 --- a/spaces/OIUGLK/bingo/src/components/chat-message.tsx +++ /dev/null @@ -1,93 +0,0 @@ -import remarkGfm from 'remark-gfm' -import remarkMath from 'remark-math' -import supersub from 'remark-supersub' -import remarkBreaks from 'remark-breaks' -import { cn } from '@/lib/utils' -import { CodeBlock } from '@/components/ui/codeblock' -import { MemoizedReactMarkdown } from '@/components/markdown' -import { LearnMore } from './learn-more' -import { ChatMessageModel } from '@/lib/bots/bing/types' -import { useEffect } from 'react' -import { TurnCounter } from './turn-counter' - -export interface ChatMessageProps { - message: ChatMessageModel -} - -export function ChatMessage({ message, ...props }: ChatMessageProps) { - useEffect(() => { - if (document.body.scrollHeight - window.innerHeight - window.scrollY - 200 < 0) { - window.scrollBy(0, 200) - } - }, [message.text]) - - return message.text ? ( -
-
- {obj.alt} - } - } catch (e) { - } - return {obj.alt} - }, - p({ children }) { - return

{children}

- }, - code({ node, inline, className, children, ...props }) { - if (children.length) { - if (children[0] == '▍') { - return ( - - ) - } - - children[0] = (children[0] as string).replace('`▍`', '▍') - } - - const match = /language-(\w+)/.exec(className || '') - - if (inline) { - return ( - - {children} - - ) - } - - return ( - - ) - } - }} - > - {message.text} -
-
-
- {message.author === 'bot' && } - {message.author === 'bot' && } -
-
- ) : null -} diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/utils/timer.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/utils/timer.py deleted file mode 100644 index e3db7d497d8b374e18b5297e0a1d6eb186fd8cba..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/utils/timer.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from time import time - - -class TimerError(Exception): - - def __init__(self, message): - self.message = message - super(TimerError, self).__init__(message) - - -class Timer: - """A flexible Timer class. - - :Example: - - >>> import time - >>> import annotator.uniformer.mmcv as mmcv - >>> with mmcv.Timer(): - >>> # simulate a code block that will run for 1s - >>> time.sleep(1) - 1.000 - >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'): - >>> # simulate a code block that will run for 1s - >>> time.sleep(1) - it takes 1.0 seconds - >>> timer = mmcv.Timer() - >>> time.sleep(0.5) - >>> print(timer.since_start()) - 0.500 - >>> time.sleep(0.5) - >>> print(timer.since_last_check()) - 0.500 - >>> print(timer.since_start()) - 1.000 - """ - - def __init__(self, start=True, print_tmpl=None): - self._is_running = False - self.print_tmpl = print_tmpl if print_tmpl else '{:.3f}' - if start: - self.start() - - @property - def is_running(self): - """bool: indicate whether the timer is running""" - return self._is_running - - def __enter__(self): - self.start() - return self - - def __exit__(self, type, value, traceback): - print(self.print_tmpl.format(self.since_last_check())) - self._is_running = False - - def start(self): - """Start the timer.""" - if not self._is_running: - self._t_start = time() - self._is_running = True - self._t_last = time() - - def since_start(self): - """Total time since the timer is started. - - Returns (float): Time in seconds. - """ - if not self._is_running: - raise TimerError('timer is not running') - self._t_last = time() - return self._t_last - self._t_start - - def since_last_check(self): - """Time since the last checking. - - Either :func:`since_start` or :func:`since_last_check` is a checking - operation. - - Returns (float): Time in seconds. - """ - if not self._is_running: - raise TimerError('timer is not running') - dur = time() - self._t_last - self._t_last = time() - return dur - - -_g_timers = {} # global timers - - -def check_time(timer_id): - """Add check points in a single line. - - This method is suitable for running a task on a list of items. A timer will - be registered when the method is called for the first time. - - :Example: - - >>> import time - >>> import annotator.uniformer.mmcv as mmcv - >>> for i in range(1, 6): - >>> # simulate a code block - >>> time.sleep(i) - >>> mmcv.check_time('task1') - 2.000 - 3.000 - 4.000 - 5.000 - - Args: - timer_id (str): Timer identifier. - """ - if timer_id not in _g_timers: - _g_timers[timer_id] = Timer() - return 0 - else: - return _g_timers[timer_id].since_last_check() diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/raft/alt_cuda_corr/correlation.cpp b/spaces/PKUWilliamYang/VToonify/vtoonify/model/raft/alt_cuda_corr/correlation.cpp deleted file mode 100644 index b01584d19edb99e7feec5f2e4c51169a1ed208db..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/raft/alt_cuda_corr/correlation.cpp +++ /dev/null @@ -1,54 +0,0 @@ -#include -#include - -// CUDA forward declarations -std::vector corr_cuda_forward( - torch::Tensor fmap1, - torch::Tensor fmap2, - torch::Tensor coords, - int radius); - -std::vector corr_cuda_backward( - torch::Tensor fmap1, - torch::Tensor fmap2, - torch::Tensor coords, - torch::Tensor corr_grad, - int radius); - -// C++ interface -#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -std::vector corr_forward( - torch::Tensor fmap1, - torch::Tensor fmap2, - torch::Tensor coords, - int radius) { - CHECK_INPUT(fmap1); - CHECK_INPUT(fmap2); - CHECK_INPUT(coords); - - return corr_cuda_forward(fmap1, fmap2, coords, radius); -} - - -std::vector corr_backward( - torch::Tensor fmap1, - torch::Tensor fmap2, - torch::Tensor coords, - torch::Tensor corr_grad, - int radius) { - CHECK_INPUT(fmap1); - CHECK_INPUT(fmap2); - CHECK_INPUT(coords); - CHECK_INPUT(corr_grad); - - return corr_cuda_backward(fmap1, fmap2, coords, corr_grad, radius); -} - - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("forward", &corr_forward, "CORR forward"); - m.def("backward", &corr_backward, "CORR backward"); -} \ No newline at end of file diff --git a/spaces/Pavankunchala/Depth-Estimation-App/app.py b/spaces/Pavankunchala/Depth-Estimation-App/app.py deleted file mode 100644 index 66fc9bf16756c12b85673b290dcd7830ab95b12d..0000000000000000000000000000000000000000 --- a/spaces/Pavankunchala/Depth-Estimation-App/app.py +++ /dev/null @@ -1,146 +0,0 @@ -import sys -import time -from pathlib import Path -import cv2 -from openvino.inference_engine import IECore -import matplotlib.cm -import matplotlib.pyplot as plt -import numpy as np -import streamlit as st -from PIL import Image -import tempfile -DEMO_IMAGE = 'dog-new.jpg' -DEMO_VIDEO = 'dance2.mp4' -@st.cache -def normalize_minmax(data): - - return (data - data.min()) / (data.max() - data.min()) -@st.cache -def convert_result_to_image(result, colormap="inferno"): - - cmap = matplotlib.cm.get_cmap(colormap) - result = result.squeeze(0) - result = normalize_minmax(result) - result = cmap(result)[:, :, :3] * 255 - result = result.astype(np.uint8) - return result -@st.cache -def to_rgb(image_data) -> np.ndarray: - - return cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB) -st.title("Depth Estimation App") -st.sidebar.title('Depth Estimation') -st.sidebar.subheader('Parameters') -DEVICE = "CPU" -MODEL_FILE = "models/MiDaS_small.xml" -model_xml_path = Path(MODEL_FILE) -ie = IECore() -net = ie.read_network(model=model_xml_path, weights=model_xml_path.with_suffix(".bin")) -exec_net = ie.load_network(network=net, device_name=DEVICE) -input_key = list(exec_net.input_info)[0] -output_key = list(exec_net.outputs.keys())[0] -network_input_shape = exec_net.input_info[input_key].tensor_desc.dims -network_image_height, network_image_width = network_input_shape[2:] -app_mode = st.sidebar.selectbox('Choose the App mode', -['Run on Image','Run on Video'],index = 0) -if app_mode == "Run on Image": - st.markdown('Running on Image') - st.sidebar.text('Params for Image') - st.markdown( - """ - - """, - unsafe_allow_html=True, - ) - img_file_buffer = st.sidebar.file_uploader("Upload an image", type=[ "jpg", "jpeg",'png']) - if img_file_buffer is not None: - image = np.array(Image.open(img_file_buffer)) - else: - demo_image = DEMO_IMAGE - image = np.array(Image.open(demo_image)) - st.sidebar.text('Original Image') - st.sidebar.image(image) - resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width)) - # reshape image to network input shape NCHW - input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0) - result = exec_net.infer(inputs={input_key: input_image})[output_key] - # convert network result of disparity map to an image that shows - # distance as colors - result_image = convert_result_to_image(result=result) - # resize back to original image shape. cv2.resize expects shape - # in (width, height), [::-1] reverses the (height, width) shape to match this. - result_image = cv2.resize(result_image, image.shape[:2][::-1]) - st.subheader('Output Image') - st.image(result_image,use_column_width= True) -if app_mode =='Run on Video': - st.markdown('Running on Video') - - video_file_buffer = st.sidebar.file_uploader("Upload a video", type=[ "mp4", "mov",'avi','asf', 'm4v' ]) - tfflie = tempfile.NamedTemporaryFile(delete=False) - stop_button = st.sidebar.button('Stop Processing') - if stop_button: - st.stop() - if not video_file_buffer: - - vid = cv2.VideoCapture(DEMO_VIDEO) - tfflie.name = DEMO_VIDEO - - - else: - tfflie.write(video_file_buffer.read()) - vid = cv2.VideoCapture(tfflie.name) - - width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) - height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = int(vid.get(cv2.CAP_PROP_FPS))#codec = cv2.VideoWriter_fourcc(*FLAGS.output_format) - codec = cv2.VideoWriter_fourcc('X','V','I','D') - out = cv2.VideoWriter('output_depth.mp4', codec, fps, (width, height)) - start_time = time.perf_counter() - total_inference_duration = 0 - stframe = st.empty() - SCALE_OUTPUT = 1 - st.markdown("**Frame Rate**") - kpi1_text = st.markdown("0") - save_video = st.checkbox('Save video') - while vid.isOpened(): - ret, image = vid.read() - new_time = time.time() - input_video_frame_height, input_video_frame_width = image.shape[:2] - target_frame_height = int(input_video_frame_height * SCALE_OUTPUT) - target_frame_width = int(input_video_frame_width * SCALE_OUTPUT) - if not ret: - vid.release() - break - resized_image = cv2.resize(src=image, dsize=(network_image_height, network_image_width)) - # reshape image to network input shape NCHW - input_image = np.expand_dims(np.transpose(resized_image, (2, 0, 1)), 0) - inference_start_time = time.perf_counter() - result = exec_net.infer(inputs={input_key: input_image})[output_key] - inference_stop_time = time.perf_counter() - inference_duration = inference_stop_time - inference_start_time - total_inference_duration += inference_duration - result_frame = to_rgb(convert_result_to_image(result)) - # Resize image and result to target frame shape - result_frame = cv2.resize(result_frame, (target_frame_width, target_frame_height)) - image = cv2.resize(image, (target_frame_width, target_frame_height)) - # Put image and result side by side - stacked_frame = np.hstack((image, result_frame)) - if save_video: - out.write(stacked_frame) - stframe.image(stacked_frame,channels = 'BGR',use_column_width=True) - fps = 1.0/(time.time() - new_time) - kpi1_text.write(f"

{'{:.1f}'.format(fps)}

", unsafe_allow_html=True) - - vid.release() - out.release() - cv2.destroyAllWindows() - st.success('Video is Processed') - st.stop() \ No newline at end of file diff --git a/spaces/Plurigrid/LifeSim/src/components/ui/collapsible.tsx b/spaces/Plurigrid/LifeSim/src/components/ui/collapsible.tsx deleted file mode 100644 index 9fa48946afd1eb56bd932377fd888e3986304676..0000000000000000000000000000000000000000 --- a/spaces/Plurigrid/LifeSim/src/components/ui/collapsible.tsx +++ /dev/null @@ -1,11 +0,0 @@ -"use client" - -import * as CollapsiblePrimitive from "@radix-ui/react-collapsible" - -const Collapsible = CollapsiblePrimitive.Root - -const CollapsibleTrigger = CollapsiblePrimitive.CollapsibleTrigger - -const CollapsibleContent = CollapsiblePrimitive.CollapsibleContent - -export { Collapsible, CollapsibleTrigger, CollapsibleContent } diff --git a/spaces/Podtekatel/ArcaneSVK2/inference/model_pipeline.py b/spaces/Podtekatel/ArcaneSVK2/inference/model_pipeline.py deleted file mode 100644 index d03117f9e420367e0733f64ff046c178f147bfbe..0000000000000000000000000000000000000000 --- a/spaces/Podtekatel/ArcaneSVK2/inference/model_pipeline.py +++ /dev/null @@ -1,115 +0,0 @@ -import logging -import time - -import cv2 -import numpy as np - -from .center_crop import center_crop -from .face_detector import FaceDetector - - -class VSNetModelPipeline: - def __init__(self, model, face_detector: FaceDetector, background_resize=720, no_detected_resize=256, use_cloning=True): - self.background_resize = background_resize - self.no_detected_resize = no_detected_resize - self.model = model - self.face_detector = face_detector - self.mask = self.create_circular_mask(face_detector.target_size, face_detector.target_size) - self.use_cloning = use_cloning - - @staticmethod - def create_circular_mask(h, w, power=None, clipping_coef=0.85): - center = (int(w / 2), int(h / 2)) - - Y, X = np.ogrid[:h, :w] - dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2) - print(dist_from_center.max(), dist_from_center.min()) - clipping_radius = min((h - center[0]), (w - center[1])) * clipping_coef - max_size = max((h - center[0]), (w - center[1])) - dist_from_center[dist_from_center < clipping_radius] = clipping_radius - dist_from_center[dist_from_center > max_size] = max_size - max_distance, min_distance = np.max(dist_from_center), np.min(dist_from_center) - dist_from_center = 1 - (dist_from_center - min_distance) / (max_distance - min_distance) - if power is not None: - dist_from_center = np.power(dist_from_center, power) - dist_from_center = np.stack([dist_from_center] * 3, axis=2) - # mask = dist_from_center <= radius - return dist_from_center - - - @staticmethod - def resize_size(image, size=720, always_apply=True): - h, w, c = np.shape(image) - if min(h, w) > size or always_apply: - if h < w: - h, w = int(size * h / w), size - else: - h, w = size, int(size * w / h) - image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA) - return image - - def normalize(self, img): - img = img.astype(np.float32) / 255 * 2 - 1 - return img - - def denormalize(self, img): - return (img + 1) / 2 - - def divide_crop(self, img, must_divided=32): - h, w, _ = img.shape - h = h // must_divided * must_divided - w = w // must_divided * must_divided - - img = center_crop(img, h, w) - return img - - def merge_crops(self, faces_imgs, crops, full_image): - for face, crop in zip(faces_imgs, crops): - x1, y1, x2, y2 = crop - W, H = x2 - x1, y2 - y1 - result_face = cv2.resize(face, (W, H), interpolation=cv2.INTER_LINEAR) - face_mask = cv2.resize(self.mask, (W, H), interpolation=cv2.INTER_LINEAR) - if self.use_cloning: - center = round((x2 + x1) / 2), round((y2 + y1) / 2) - full_image = cv2.seamlessClone(result_face, full_image, (face_mask > 0.0).astype(np.uint8) * 255, center, cv2.NORMAL_CLONE) - else: - input_face = full_image[y1: y2, x1: x2] - full_image[y1: y2, x1: x2] = (result_face * face_mask + input_face * (1 - face_mask)).astype(np.uint8) - return full_image - - def __call__(self, img): - return self.process_image(img) - - def process_image(self, img): - img = self.resize_size(img, size=self.background_resize) - img = self.divide_crop(img) - - face_crops, coords = self.face_detector(img) - - if len(face_crops) > 0: - start_time = time.time() - faces = self.normalize(face_crops) - faces = faces.transpose(0, 3, 1, 2) - out_faces = self.model(faces) - out_faces = self.denormalize(out_faces) - out_faces = out_faces.transpose(0, 2, 3, 1) - out_faces = np.clip(out_faces * 255, 0, 255).astype(np.uint8) - end_time = time.time() - logging.info(f'Face FPS {1 / (end_time - start_time)}') - else: - out_faces = [] - img = self.resize_size(img, size=self.no_detected_resize) - img = self.divide_crop(img) - - start_time = time.time() - full_image = self.normalize(img) - full_image = np.expand_dims(full_image, 0).transpose(0, 3, 1, 2) - full_image = self.model(full_image) - full_image = self.denormalize(full_image) - full_image = full_image.transpose(0, 2, 3, 1) - full_image = np.clip(full_image * 255, 0, 255).astype(np.uint8) - end_time = time.time() - logging.info(f'Background FPS {1 / (end_time - start_time)}') - - result_image = self.merge_crops(out_faces, coords, full_image[0]) - return result_image diff --git a/spaces/Pranjal12345/Text_to_Speech/tortoise/read.py b/spaces/Pranjal12345/Text_to_Speech/tortoise/read.py deleted file mode 100644 index e5839aa89522d4770ab3f53ef2aca5b7eb7eac84..0000000000000000000000000000000000000000 --- a/spaces/Pranjal12345/Text_to_Speech/tortoise/read.py +++ /dev/null @@ -1,101 +0,0 @@ -import argparse -import os -from time import time - -import torch -import torchaudio - -from api import TextToSpeech, MODELS_DIR -from utils.audio import load_audio, load_voices -from utils.text import split_and_recombine_text - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--textfile', type=str, help='A file containing the text to read.', default="tortoise/data/riding_hood.txt") - parser.add_argument('--voice', type=str, help='Selects the voice to use for generation. See options in voices/ directory (and add your own!) ' - 'Use the & character to join two voices together. Use a comma to perform inference on multiple voices.', default='pat') - parser.add_argument('--output_path', type=str, help='Where to store outputs.', default='results/longform/') - parser.add_argument('--output_name', type=str, help='How to name the output file', default='combined.wav') - parser.add_argument('--preset', type=str, help='Which voice preset to use.', default='standard') - parser.add_argument('--regenerate', type=str, help='Comma-separated list of clip numbers to re-generate, or nothing.', default=None) - parser.add_argument('--candidates', type=int, help='How many output candidates to produce per-voice. Only the first candidate is actually used in the final product, the others can be used manually.', default=1) - parser.add_argument('--model_dir', type=str, help='Where to find pretrained model checkpoints. Tortoise automatically downloads these to .models, so this' - 'should only be specified if you have custom checkpoints.', default=MODELS_DIR) - parser.add_argument('--seed', type=int, help='Random seed which can be used to reproduce results.', default=None) - parser.add_argument('--produce_debug_state', type=bool, help='Whether or not to produce debug_state.pth, which can aid in reproducing problems. Defaults to true.', default=True) - parser.add_argument('--use_deepspeed', type=bool, help='Use deepspeed for speed bump.', default=False) - parser.add_argument('--kv_cache', type=bool, help='If you disable this please wait for a long a time to get the output', default=True) - parser.add_argument('--half', type=bool, help="float16(half) precision inference if True it's faster and take less vram and ram", default=True) - - - args = parser.parse_args() - if torch.backends.mps.is_available(): - args.use_deepspeed = False - tts = TextToSpeech(models_dir=args.model_dir, use_deepspeed=args.use_deepspeed, kv_cache=args.kv_cache, half=args.half) - - outpath = args.output_path - outname = args.output_name - selected_voices = args.voice.split(',') - regenerate = args.regenerate - if regenerate is not None: - regenerate = [int(e) for e in regenerate.split(',')] - - # Process text - with open(args.textfile, 'r', encoding='utf-8') as f: - text = ' '.join([l for l in f.readlines()]) - if '|' in text: - print("Found the '|' character in your text, which I will use as a cue for where to split it up. If this was not" - "your intent, please remove all '|' characters from the input.") - texts = text.split('|') - else: - texts = split_and_recombine_text(text) - - seed = int(time()) if args.seed is None else args.seed - for selected_voice in selected_voices: - voice_outpath = os.path.join(outpath, selected_voice) - os.makedirs(voice_outpath, exist_ok=True) - - if '&' in selected_voice: - voice_sel = selected_voice.split('&') - else: - voice_sel = [selected_voice] - - voice_samples, conditioning_latents = load_voices(voice_sel) - all_parts = [] - for j, text in enumerate(texts): - if regenerate is not None and j not in regenerate: - all_parts.append(load_audio(os.path.join(voice_outpath, f'{j}.wav'), 24000)) - continue - gen = tts.tts_with_preset(text, voice_samples=voice_samples, conditioning_latents=conditioning_latents, - preset=args.preset, k=args.candidates, use_deterministic_seed=seed) - if args.candidates == 1: - audio_ = gen.squeeze(0).cpu() - torchaudio.save(os.path.join(voice_outpath, f'{j}.wav'), audio_, 24000) - else: - candidate_dir = os.path.join(voice_outpath, str(j)) - os.makedirs(candidate_dir, exist_ok=True) - for k, g in enumerate(gen): - torchaudio.save(os.path.join(candidate_dir, f'{k}.wav'), g.squeeze(0).cpu(), 24000) - audio_ = gen[0].squeeze(0).cpu() - all_parts.append(audio_) - - if args.candidates == 1: - full_audio = torch.cat(all_parts, dim=-1) - torchaudio.save(os.path.join(voice_outpath, f"{outname}.wav"), full_audio, 24000) - - if args.produce_debug_state: - os.makedirs('debug_states', exist_ok=True) - dbg_state = (seed, texts, voice_samples, conditioning_latents) - torch.save(dbg_state, f'debug_states/read_debug_{selected_voice}.pth') - - # Combine each candidate's audio clips. - if args.candidates > 1: - audio_clips = [] - for candidate in range(args.candidates): - for line in range(len(texts)): - wav_file = os.path.join(voice_outpath, str(line), f"{candidate}.wav") - audio_clips.append(load_audio(wav_file, 24000)) - audio_clips = torch.cat(audio_clips, dim=-1) - torchaudio.save(os.path.join(voice_outpath, f"{outname}_{candidate:02d}.wav"), audio_clips, 24000) - audio_clips = [] diff --git a/spaces/RMXK/RVC_HFF/gui_v1.py b/spaces/RMXK/RVC_HFF/gui_v1.py deleted file mode 100644 index becba80cdda6987c1ad70c89e68a4e3a4da44639..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/gui_v1.py +++ /dev/null @@ -1,708 +0,0 @@ -import os -import logging -import sys -from dotenv import load_dotenv - -load_dotenv() - -os.environ["OMP_NUM_THREADS"] = "4" -if sys.platform == "darwin": - os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" - -now_dir = os.getcwd() -sys.path.append(now_dir) -import multiprocessing - -logger = logging.getLogger(__name__) - - -class Harvest(multiprocessing.Process): - def __init__(self, inp_q, opt_q): - multiprocessing.Process.__init__(self) - self.inp_q = inp_q - self.opt_q = opt_q - - def run(self): - import numpy as np - import pyworld - - while 1: - idx, x, res_f0, n_cpu, ts = self.inp_q.get() - f0, t = pyworld.harvest( - x.astype(np.double), - fs=16000, - f0_ceil=1100, - f0_floor=50, - frame_period=10, - ) - res_f0[idx] = f0 - if len(res_f0.keys()) >= n_cpu: - self.opt_q.put(ts) - - -if __name__ == "__main__": - import json - import multiprocessing - import re - import threading - import time - import traceback - from multiprocessing import Queue, cpu_count - from queue import Empty - - import librosa - from tools.torchgate import TorchGate - import numpy as np - import PySimpleGUI as sg - import sounddevice as sd - import torch - import torch.nn.functional as F - import torchaudio.transforms as tat - - import tools.rvc_for_realtime as rvc_for_realtime - from i18n.i18n import I18nAuto - - i18n = I18nAuto() - device = rvc_for_realtime.config.device - # device = torch.device( - # "cuda" - # if torch.cuda.is_available() - # else ("mps" if torch.backends.mps.is_available() else "cpu") - # ) - current_dir = os.getcwd() - inp_q = Queue() - opt_q = Queue() - n_cpu = min(cpu_count(), 8) - for _ in range(n_cpu): - Harvest(inp_q, opt_q).start() - - class GUIConfig: - def __init__(self) -> None: - self.pth_path: str = "" - self.index_path: str = "" - self.pitch: int = 0 - self.samplerate: int = 40000 - self.block_time: float = 1.0 # s - self.buffer_num: int = 1 - self.threhold: int = -60 - self.crossfade_time: float = 0.04 - self.extra_time: float = 2.0 - self.I_noise_reduce = False - self.O_noise_reduce = False - self.rms_mix_rate = 0.0 - self.index_rate = 0.3 - self.n_cpu = min(n_cpu, 6) - self.f0method = "harvest" - self.sg_input_device = "" - self.sg_output_device = "" - - class GUI: - def __init__(self) -> None: - self.config = GUIConfig() - self.flag_vc = False - - self.launcher() - - def load(self): - input_devices, output_devices, _, _ = self.get_devices() - try: - with open("configs/config.json", "r") as j: - data = json.load(j) - data["pm"] = data["f0method"] == "pm" - data["harvest"] = data["f0method"] == "harvest" - data["crepe"] = data["f0method"] == "crepe" - data["rmvpe"] = data["f0method"] == "rmvpe" - except: - with open("configs/config.json", "w") as j: - data = { - "pth_path": " ", - "index_path": " ", - "sg_input_device": input_devices[sd.default.device[0]], - "sg_output_device": output_devices[sd.default.device[1]], - "threhold": "-60", - "pitch": "0", - "index_rate": "0", - "rms_mix_rate": "0", - "block_time": "0.25", - "crossfade_length": "0.04", - "extra_time": "2", - "f0method": "rmvpe", - } - data["pm"] = data["f0method"] == "pm" - data["harvest"] = data["f0method"] == "harvest" - data["crepe"] = data["f0method"] == "crepe" - data["rmvpe"] = data["f0method"] == "rmvpe" - return data - - def launcher(self): - data = self.load() - sg.theme("LightBlue3") - input_devices, output_devices, _, _ = self.get_devices() - layout = [ - [ - sg.Frame( - title=i18n("加载模型"), - layout=[ - [ - sg.Input( - default_text=data.get("pth_path", ""), - key="pth_path", - ), - sg.FileBrowse( - i18n("选择.pth文件"), - initial_folder=os.path.join( - os.getcwd(), "assets/weights" - ), - file_types=((". pth"),), - ), - ], - [ - sg.Input( - default_text=data.get("index_path", ""), - key="index_path", - ), - sg.FileBrowse( - i18n("选择.index文件"), - initial_folder=os.path.join(os.getcwd(), "logs"), - file_types=((". index"),), - ), - ], - ], - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("输入设备")), - sg.Combo( - input_devices, - key="sg_input_device", - default_value=data.get("sg_input_device", ""), - ), - ], - [ - sg.Text(i18n("输出设备")), - sg.Combo( - output_devices, - key="sg_output_device", - default_value=data.get("sg_output_device", ""), - ), - ], - [sg.Button(i18n("重载设备列表"), key="reload_devices")], - ], - title=i18n("音频设备(请使用同种类驱动)"), - ) - ], - [ - sg.Frame( - layout=[ - [ - sg.Text(i18n("响应阈值")), - sg.Slider( - range=(-60, 0), - key="threhold", - resolution=1, - orientation="h", - default_value=data.get("threhold", "-60"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("音调设置")), - sg.Slider( - range=(-24, 24), - key="pitch", - resolution=1, - orientation="h", - default_value=data.get("pitch", "0"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("Index Rate")), - sg.Slider( - range=(0.0, 1.0), - key="index_rate", - resolution=0.01, - orientation="h", - default_value=data.get("index_rate", "0"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("响度因子")), - sg.Slider( - range=(0.0, 1.0), - key="rms_mix_rate", - resolution=0.01, - orientation="h", - default_value=data.get("rms_mix_rate", "0"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("音高算法")), - sg.Radio( - "pm", - "f0method", - key="pm", - default=data.get("pm", "") == True, - enable_events=True, - ), - sg.Radio( - "harvest", - "f0method", - key="harvest", - default=data.get("harvest", "") == True, - enable_events=True, - ), - sg.Radio( - "crepe", - "f0method", - key="crepe", - default=data.get("crepe", "") == True, - enable_events=True, - ), - sg.Radio( - "rmvpe", - "f0method", - key="rmvpe", - default=data.get("rmvpe", "") == True, - enable_events=True, - ), - ], - ], - title=i18n("常规设置"), - ), - sg.Frame( - layout=[ - [ - sg.Text(i18n("采样长度")), - sg.Slider( - range=(0.05, 2.4), - key="block_time", - resolution=0.01, - orientation="h", - default_value=data.get("block_time", "0.25"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("harvest进程数")), - sg.Slider( - range=(1, n_cpu), - key="n_cpu", - resolution=1, - orientation="h", - default_value=data.get( - "n_cpu", min(self.config.n_cpu, n_cpu) - ), - enable_events=True, - ), - ], - [ - sg.Text(i18n("淡入淡出长度")), - sg.Slider( - range=(0.01, 0.15), - key="crossfade_length", - resolution=0.01, - orientation="h", - default_value=data.get("crossfade_length", "0.04"), - enable_events=True, - ), - ], - [ - sg.Text(i18n("额外推理时长")), - sg.Slider( - range=(0.05, 5.00), - key="extra_time", - resolution=0.01, - orientation="h", - default_value=data.get("extra_time", "2.0"), - enable_events=True, - ), - ], - [ - sg.Checkbox( - i18n("输入降噪"), - key="I_noise_reduce", - enable_events=True, - ), - sg.Checkbox( - i18n("输出降噪"), - key="O_noise_reduce", - enable_events=True, - ), - ], - ], - title=i18n("性能设置"), - ), - ], - [ - sg.Button(i18n("开始音频转换"), key="start_vc"), - sg.Button(i18n("停止音频转换"), key="stop_vc"), - sg.Text(i18n("推理时间(ms):")), - sg.Text("0", key="infer_time"), - ], - ] - self.window = sg.Window("RVC - GUI", layout=layout, finalize=True) - self.event_handler() - - def event_handler(self): - while True: - event, values = self.window.read() - if event == sg.WINDOW_CLOSED: - self.flag_vc = False - exit() - if event == "reload_devices": - prev_input = self.window["sg_input_device"].get() - prev_output = self.window["sg_output_device"].get() - input_devices, output_devices, _, _ = self.get_devices(update=True) - if prev_input not in input_devices: - self.config.sg_input_device = input_devices[0] - else: - self.config.sg_input_device = prev_input - self.window["sg_input_device"].Update(values=input_devices) - self.window["sg_input_device"].Update( - value=self.config.sg_input_device - ) - if prev_output not in output_devices: - self.config.sg_output_device = output_devices[0] - else: - self.config.sg_output_device = prev_output - self.window["sg_output_device"].Update(values=output_devices) - self.window["sg_output_device"].Update( - value=self.config.sg_output_device - ) - if event == "start_vc" and self.flag_vc == False: - if self.set_values(values) == True: - logger.info("Use CUDA: %s", torch.cuda.is_available()) - self.start_vc() - settings = { - "pth_path": values["pth_path"], - "index_path": values["index_path"], - "sg_input_device": values["sg_input_device"], - "sg_output_device": values["sg_output_device"], - "threhold": values["threhold"], - "pitch": values["pitch"], - "rms_mix_rate": values["rms_mix_rate"], - "index_rate": values["index_rate"], - "block_time": values["block_time"], - "crossfade_length": values["crossfade_length"], - "extra_time": values["extra_time"], - "n_cpu": values["n_cpu"], - "f0method": ["pm", "harvest", "crepe", "rmvpe"][ - [ - values["pm"], - values["harvest"], - values["crepe"], - values["rmvpe"], - ].index(True) - ], - } - with open("configs/config.json", "w") as j: - json.dump(settings, j) - if event == "stop_vc" and self.flag_vc == True: - self.flag_vc = False - - # Parameter hot update - if event == "threhold": - self.config.threhold = values["threhold"] - elif event == "pitch": - self.config.pitch = values["pitch"] - if hasattr(self, "rvc"): - self.rvc.change_key(values["pitch"]) - elif event == "index_rate": - self.config.index_rate = values["index_rate"] - if hasattr(self, "rvc"): - self.rvc.change_index_rate(values["index_rate"]) - elif event == "rms_mix_rate": - self.config.rms_mix_rate = values["rms_mix_rate"] - elif event in ["pm", "harvest", "crepe", "rmvpe"]: - self.config.f0method = event - elif event == "I_noise_reduce": - self.config.I_noise_reduce = values["I_noise_reduce"] - elif event == "O_noise_reduce": - self.config.O_noise_reduce = values["O_noise_reduce"] - elif event != "start_vc" and self.flag_vc == True: - # Other parameters do not support hot update - self.flag_vc = False - - def set_values(self, values): - if len(values["pth_path"].strip()) == 0: - sg.popup(i18n("请选择pth文件")) - return False - if len(values["index_path"].strip()) == 0: - sg.popup(i18n("请选择index文件")) - return False - pattern = re.compile("[^\x00-\x7F]+") - if pattern.findall(values["pth_path"]): - sg.popup(i18n("pth文件路径不可包含中文")) - return False - if pattern.findall(values["index_path"]): - sg.popup(i18n("index文件路径不可包含中文")) - return False - self.set_devices(values["sg_input_device"], values["sg_output_device"]) - self.config.pth_path = values["pth_path"] - self.config.index_path = values["index_path"] - self.config.threhold = values["threhold"] - self.config.pitch = values["pitch"] - self.config.block_time = values["block_time"] - self.config.crossfade_time = values["crossfade_length"] - self.config.extra_time = values["extra_time"] - self.config.I_noise_reduce = values["I_noise_reduce"] - self.config.O_noise_reduce = values["O_noise_reduce"] - self.config.rms_mix_rate = values["rms_mix_rate"] - self.config.index_rate = values["index_rate"] - self.config.n_cpu = values["n_cpu"] - self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][ - [ - values["pm"], - values["harvest"], - values["crepe"], - values["rmvpe"], - ].index(True) - ] - return True - - def start_vc(self): - torch.cuda.empty_cache() - self.flag_vc = True - self.rvc = rvc_for_realtime.RVC( - self.config.pitch, - self.config.pth_path, - self.config.index_path, - self.config.index_rate, - self.config.n_cpu, - inp_q, - opt_q, - device, - self.rvc if hasattr(self, "rvc") else None - ) - self.config.samplerate = self.rvc.tgt_sr - self.zc = self.rvc.tgt_sr // 100 - self.block_frame = int(np.round(self.config.block_time * self.config.samplerate / self.zc)) * self.zc - self.block_frame_16k = 160 * self.block_frame // self.zc - self.crossfade_frame = int(np.round(self.config.crossfade_time * self.config.samplerate / self.zc)) * self.zc - self.sola_search_frame = self.zc - self.extra_frame = int(np.round(self.config.extra_time * self.config.samplerate / self.zc)) * self.zc - self.input_wav: torch.Tensor = torch.zeros( - self.extra_frame - + self.crossfade_frame - + self.sola_search_frame - + self.block_frame, - device=device, - dtype=torch.float32, - ) - self.input_wav_res: torch.Tensor= torch.zeros(160 * self.input_wav.shape[0] // self.zc, device=device,dtype=torch.float32) - self.pitch: np.ndarray = np.zeros( - self.input_wav.shape[0] // self.zc, - dtype="int32", - ) - self.pitchf: np.ndarray = np.zeros( - self.input_wav.shape[0] // self.zc, - dtype="float64", - ) - self.sola_buffer: torch.Tensor = torch.zeros( - self.crossfade_frame, device=device, dtype=torch.float32 - ) - self.nr_buffer: torch.Tensor = self.sola_buffer.clone() - self.output_buffer: torch.Tensor = self.input_wav.clone() - self.res_buffer: torch.Tensor = torch.zeros(2 * self.zc, device=device,dtype=torch.float32) - self.valid_rate = 1 - (self.extra_frame - 1) / self.input_wav.shape[0] - self.fade_in_window: torch.Tensor = ( - torch.sin( - 0.5 - * np.pi - * torch.linspace( - 0.0, - 1.0, - steps=self.crossfade_frame, - device=device, - dtype=torch.float32, - ) - ) - ** 2 - ) - self.fade_out_window: torch.Tensor = 1 - self.fade_in_window - self.resampler = tat.Resample( - orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32 - ).to(device) - self.tg = TorchGate(sr=self.config.samplerate, n_fft=4*self.zc, prop_decrease=0.9).to(device) - thread_vc = threading.Thread(target=self.soundinput) - thread_vc.start() - - def soundinput(self): - """ - 接受音频输入 - """ - channels = 1 if sys.platform == "darwin" else 2 - with sd.Stream( - channels=channels, - callback=self.audio_callback, - blocksize=self.block_frame, - samplerate=self.config.samplerate, - dtype="float32", - ): - while self.flag_vc: - time.sleep(self.config.block_time) - logger.debug("Audio block passed.") - logger.debug("ENDing VC") - - def audio_callback( - self, indata: np.ndarray, outdata: np.ndarray, frames, times, status - ): - """ - 音频处理 - """ - start_time = time.perf_counter() - indata = librosa.to_mono(indata.T) - if self.config.threhold > -60: - rms = librosa.feature.rms( - y=indata, frame_length=4*self.zc, hop_length=self.zc - ) - db_threhold = ( - librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold - ) - for i in range(db_threhold.shape[0]): - if db_threhold[i]: - indata[i * self.zc : (i + 1) * self.zc] = 0 - self.input_wav[: -self.block_frame] = self.input_wav[self.block_frame :].clone() - self.input_wav[-self.block_frame: ] = torch.from_numpy(indata).to(device) - self.input_wav_res[ : -self.block_frame_16k] = self.input_wav_res[self.block_frame_16k :].clone() - # input noise reduction and resampling - if self.config.I_noise_reduce: - input_wav = self.input_wav[-self.crossfade_frame -self.block_frame-2*self.zc: ] - input_wav = self.tg(input_wav.unsqueeze(0), self.input_wav.unsqueeze(0))[0, 2*self.zc:] - input_wav[: self.crossfade_frame] *= self.fade_in_window - input_wav[: self.crossfade_frame] += self.nr_buffer * self.fade_out_window - self.nr_buffer[:] = input_wav[-self.crossfade_frame: ] - input_wav = torch.cat((self.res_buffer[:], input_wav[: self.block_frame])) - self.res_buffer[:] = input_wav[-2*self.zc: ] - self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(input_wav)[160: ] - else: - self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(self.input_wav[-self.block_frame-2*self.zc: ])[160: ] - # infer - f0_extractor_frame = self.block_frame_16k + 800 - if self.config.f0method == 'rmvpe': - f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1) - infer_wav = self.rvc.infer( - self.input_wav_res, - self.input_wav_res[-f0_extractor_frame :].cpu().numpy(), - self.block_frame_16k, - self.valid_rate, - self.pitch, - self.pitchf, - self.config.f0method, - ) - infer_wav = infer_wav[ - -self.crossfade_frame - self.sola_search_frame - self.block_frame : - ] - # output noise reduction - if self.config.O_noise_reduce: - self.output_buffer[: -self.block_frame] = self.output_buffer[self.block_frame :].clone() - self.output_buffer[-self.block_frame: ] = infer_wav[-self.block_frame:] - infer_wav = self.tg(infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)).squeeze(0) - # volume envelop mixing - if self.config.rms_mix_rate < 1: - rms1 = librosa.feature.rms( - y=self.input_wav_res[-160*infer_wav.shape[0]//self.zc :].cpu().numpy(), - frame_length=640, - hop_length=160, - ) - rms1 = torch.from_numpy(rms1).to(device) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True, - )[0,0,:-1] - rms2 = librosa.feature.rms( - y=infer_wav[:].cpu().numpy(), frame_length=4*self.zc, hop_length=self.zc - ) - rms2 = torch.from_numpy(rms2).to(device) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True, - )[0,0,:-1] - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3) - infer_wav *= torch.pow(rms1 / rms2, torch.tensor(1 - self.config.rms_mix_rate)) - # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC - conv_input = infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame] - cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :]) - cor_den = torch.sqrt( - F.conv1d(conv_input ** 2, torch.ones(1, 1, self.crossfade_frame, device=device)) + 1e-8) - if sys.platform == "darwin": - _, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0]) - sola_offset = sola_offset.item() - else: - sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0]) - logger.debug("sola_offset = %d", int(sola_offset)) - infer_wav = infer_wav[sola_offset: sola_offset + self.block_frame + self.crossfade_frame] - infer_wav[: self.crossfade_frame] *= self.fade_in_window - infer_wav[: self.crossfade_frame] += self.sola_buffer *self.fade_out_window - self.sola_buffer[:] = infer_wav[-self.crossfade_frame:] - if sys.platform == "darwin": - outdata[:] = infer_wav[:-self.crossfade_frame].cpu().numpy()[:, np.newaxis] - else: - outdata[:] = infer_wav[:-self.crossfade_frame].repeat(2, 1).t().cpu().numpy() - total_time = time.perf_counter() - start_time - self.window["infer_time"].update(int(total_time * 1000)) - logger.info("Infer time: %.2f", total_time) - - def get_devices(self, update: bool = True): - """获取设备列表""" - if update: - sd._terminate() - sd._initialize() - devices = sd.query_devices() - hostapis = sd.query_hostapis() - for hostapi in hostapis: - for device_idx in hostapi["devices"]: - devices[device_idx]["hostapi_name"] = hostapi["name"] - input_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices = [ - f"{d['name']} ({d['hostapi_name']})" - for d in devices - if d["max_output_channels"] > 0 - ] - input_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_input_channels"] > 0 - ] - output_devices_indices = [ - d["index"] if "index" in d else d["name"] - for d in devices - if d["max_output_channels"] > 0 - ] - return ( - input_devices, - output_devices, - input_devices_indices, - output_devices_indices, - ) - - def set_devices(self, input_device, output_device): - """设置输出设备""" - ( - input_devices, - output_devices, - input_device_indices, - output_device_indices, - ) = self.get_devices() - sd.default.device[0] = input_device_indices[ - input_devices.index(input_device) - ] - sd.default.device[1] = output_device_indices[ - output_devices.index(output_device) - ] - logger.info( - "Input device: %s:%s", str(sd.default.device[0]), input_device - ) - logger.info( - "Output device: %s:%s", str(sd.default.device[1]), output_device - ) - - gui = GUI() \ No newline at end of file diff --git a/spaces/RTLAI/BLIPsinki/app.py b/spaces/RTLAI/BLIPsinki/app.py deleted file mode 100644 index bae7449f9dcbc0a2e5cebc21064df384b2f2b78a..0000000000000000000000000000000000000000 --- a/spaces/RTLAI/BLIPsinki/app.py +++ /dev/null @@ -1,56 +0,0 @@ -import numpy as np -import requests -import gradio as gr -from transformers import pipeline - - - -def getModelPath(language): - if language == 'English': - path = None - elif language == 'German': - path = "Helsinki-NLP/opus-mt-en-de" - elif language == 'French': - path = "Helsinki-NLP/opus-mt-en-fr" - elif language == 'Spanish': - path = "Helsinki-NLP/opus-mt-en-es" - elif language == 'Chinese': - path = "Helsinki-NLP/opus-mt-en-zh" - elif language == 'Ukranian': - path = "Helsinki-NLP/opus-mt-en-uk" - elif language == 'Swedish': - path = "Helsinki-NLP/opus-mt-en-sv" - elif language == 'Arabic': - path = "Helsinki-NLP/opus-mt-en-ar" - elif language == 'Italian': - path = "Helsinki-NLP/opus-mt-en-it" - elif language == 'Hindi': - path = "Helsinki-NLP/opus-mt-en-hi" - return(path) - -def blipsinki(input_img,strategy,language): - b64_string = gr.processing_utils.encode_url_or_file_to_base64(input_img) - response = requests.post(url='https://salesforce-blip.hf.space/api/predict', json={"data": [ b64_string,"Image Captioning","None",str(strategy)]}) - jres = response.json() - print(jres) - - cap = jres["data"][0] - modelpath = getModelPath(language) - if modelpath: - translator = pipeline("translation", model=modelpath) - trans_cap = translator(cap) - tc = trans_cap[0]['translation_text'] - return str(tc) - else: - return str(cap) - -description = "A pipeline of BLIP image captioning and Helsinki translation in order to generate image captions in a language of your choice either with beam search (deterministic) or nucleus sampling (stochastic). Enjoy! Is the language you want to use missing? Let me know and I'll integrate it." - - -inputs_ = [gr.inputs.Image(type='filepath', label="Input Image"),gr.inputs.Radio(choices=['Beam search','Nucleus sampling'], type="value", default="Nucleus sampling", label="Mode"), gr.inputs.Radio(choices=['English','German', 'French', 'Spanish', 'Chinese', 'Ukranian', 'Swedish', 'Arabic', 'Italian', 'Hindi'],type="value", default = 'German',label="Language")] - -outputs_ = gr.outputs.Textbox(label="Output") - -iface = gr.Interface(blipsinki, inputs_, outputs_, description=description) - -iface.launch(debug=True,show_error=True) \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/utils/_log.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/utils/_log.py deleted file mode 100644 index 92c4c6a193873ce09629f6cfaa2dabc4f14ecb03..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/utils/_log.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Customize logging - -Defines custom logger class for the `logger.verbose(...)` method. - -init_logging() must be called before any other modules that call logging.getLogger. -""" - -import logging -from typing import Any, cast - -# custom log level for `--verbose` output -# between DEBUG and INFO -VERBOSE = 15 - - -class VerboseLogger(logging.Logger): - """Custom Logger, defining a verbose log-level - - VERBOSE is between INFO and DEBUG. - """ - - def verbose(self, msg: str, *args: Any, **kwargs: Any) -> None: - return self.log(VERBOSE, msg, *args, **kwargs) - - -def getLogger(name: str) -> VerboseLogger: - """logging.getLogger, but ensures our VerboseLogger class is returned""" - return cast(VerboseLogger, logging.getLogger(name)) - - -def init_logging() -> None: - """Register our VerboseLogger and VERBOSE log level. - - Should be called before any calls to getLogger(), - i.e. in pip._internal.__init__ - """ - logging.setLoggerClass(VerboseLogger) - logging.addLevelName(VERBOSE, "VERBOSE") diff --git a/spaces/ReThGe/Linet/rethge_torch.py b/spaces/ReThGe/Linet/rethge_torch.py deleted file mode 100644 index d01d648809c108af44eb09dfd3963158e68c6064..0000000000000000000000000000000000000000 --- a/spaces/ReThGe/Linet/rethge_torch.py +++ /dev/null @@ -1,1192 +0,0 @@ -## this file contains custom-code functions for pytorch deeplearning -# containing model training/eval func, results/image plot func, and other help_funcs too -# belongs to: rethge -# created data: 2023/07/02 - - -## imports -# torch related -import torch -from torch import nn -import torchvision -from torch.utils.data import DataLoader, Dataset -from torchvision import datasets, transforms - - -# data related - -import pandas as pd -from PIL import Image -import matplotlib.pyplot as plt - -from torchmetrics import ConfusionMatrix -from mlxtend.plotting import plot_confusion_matrix - -# system related -import os, gc -import shutil -import pathlib -from pathlib import Path -import random -from typing import Tuple, Dict, List -from timeit import default_timer as timer -from tqdm.auto import tqdm - - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— -# utils related funcs -def set_seeds(seed: int=42): - """Sets random sets for torch operations. - - Args: - seed (int, optional): Random seed to set. Defaults to 42. - """ - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - - -def device_picking(): - """ - if GPU is available, using GPU, otherwise use CPU - """ - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - print(f"Using {device} to DeepLearning") - return device - - -def check_cuda_cache_and_clean(clean: bool = False): - """ - run a cuda mem checking, and clean cache when needed - """ - os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" - os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb: 128" - - cached_tensor = torch.cuda.memory_allocated() /1024/1024 - total_cached = torch.cuda.memory_reserved() /1024/1024 - - print(f"current GPU memory occupied by tensors: {cached_tensor} Mb") - print(f"current GPU memory managed by the caching allocator: {total_cached} Mb") - print(f"rest GPU mem: {total_cached-cached_tensor} Mb\n") - - if clean: - gc.collect() - torch.cuda.empty_cache() - cached_tensor = torch.cuda.memory_allocated() /1024/1024 - total_cached = torch.cuda.memory_reserved() /1024/1024 - print(f"GPU memory occupied by tensors after clean: {cached_tensor} Mb") - print(f"GPU memory managed by the caching allocator after clean: {total_cached} Mb") -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— -# directory/file manipulate related funcs - -def walk_through_dir(dir_path: pathlib.Path): - """ - know about your dataset dir - """ - - for dirpath, dirname, filenames in os.walk(dir_path): - print(f"There are {len(dirname)} directories and {len(filenames)} images in '{dirpath}'.") - - -def rename_get_rid_of_txt_suffix(working_dir: str): - - ''' - working dir should only exist the one type of file, and no folder - ''' - - os.chdir(working_dir) - names=[] - for i in os.listdir(working_dir): - n = i.removesuffix('.txt') - names.append(n) - - for i, j in enumerate(os.listdir(working_dir)): - file_full_dir = f'{working_dir}\{j}' - rename = f'{working_dir}\{names[i]}' - os.rename(file_full_dir, rename) - - -def rename_suffix(working_dir: str, - suffix_to_add: str): - - """ - add suffix to all the file in a dir - """ - - for i in os.listdir(working_dir): - file_full_dir = f'{working_dir}\{i}' - rename = f'{file_full_dir}.{suffix_to_add}' - os.rename(file_full_dir, rename) - - -def copy_file_to_dir(working_dir: str, - aim_dir: str): - - """copy all the file to a dir""" - - os.chdir(working_dir) - for file in os.listdir(): - shutil.move(file, aim_dir) - - -def remove_unused_label(image_dir: str, - label_dir: str): - - """ - for object detection project data file management - remove un-used label - """ - - label_dir_list = list(Path(label_dir).glob('*.*')) - name_img = [] - count = 0 - - for i in os.listdir(image_dir): - - n = i.removesuffix('.jpg') - name_img.append(n) - - for names in label_dir_list: - if names.stem not in name_img: - os.remove(names) - count += 1 - print(f"removed {count} unused labels") - - -def find_missing_label(image_dir: str, - label_dir: str) -> list: - - """ - for object detection project data file management - find missed image label - """ - - # the stem name of label - label_stem = [] - image_stem = [] - dir_missing_label = [] - - for i in os.listdir(label_dir): - if i == 'classes.txt': - continue - n = i.removesuffix('.txt') - label_stem.append(n) - - for i in os.listdir(image_dir): - if i == 'classes.txt': - continue - n = i.removesuffix('.jpg') - image_stem.append(n) - - - a = [x for x in image_stem if x not in label_stem] - for i in a: - suffix = '.jpg' - i = f'{i}{suffix}' - dir = f'{image_dir}\\{i}' - dir_missing_label.append(Path(dir)) - - print(f"missing {len(dir_missing_label)} label") - - return dir_missing_label - - -def adding_nothing_label(image_dir: str, - label_dir: str): - - """ - for object detection project data file management - create empty txt file as 'nothing' label - """ - - label_name = [] - image_name = [] - - for i in os.listdir(label_dir): - if i == 'classes.txt': - continue - - nl = i.removesuffix('.txt') - label_name.append(nl) - - for i in os.listdir(image_dir): - if i == 'classes.txt': - continue - - nm = i.removesuffix('.jpg') - image_name.append(nm) - - compare = [x for x in image_name if x not in label_name] - print(f"missing {len(compare)} label\nimage number: {len(image_name)}\nlabel number: {len(label_name)}") - - for i in compare: - suffix = '.txt' - i = f'{i}{suffix}' - dir = f'{label_dir}\\{i}' - - with open(dir, 'w') as fb: - fb.close() - - if len(compare) == 0: - print(f"No label is missing in {label_dir}") - else: - print(f"now having {len(os.listdir(label_dir))} files in folder") - - -def find_classes(dir: str) -> Tuple[List[str], Dict[str, int]]: - """ - find the class folder names in a target dir - - example: - classname, class_dict = find_classes(dir) # [anode, cathode, nothing] - - """ - - classes = sorted(entry.name for entry in os.scandir(dir) if entry.is_dir()) - - if not classes: - raise FileNotFoundError(f"Couldn't find any classes in {dir}... please check file structure") - - class_to_idx = {class_name: i for i, class_name in enumerate(classes)} - - return classes, class_to_idx - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— - - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— -# plot related funcs -def plot_trans(img_path_list: List[str], # img_path_list = list(img_path.glob('*/*/*.jpg')) - transform: torchvision.transforms, - n: int = 3, - seed=None): - """ - select random img from a path list, and using transform, and visualize - - example: - img_path_list = list(img_path.glob('*/*/*.jpg')) - transform = transform.Compose([...]) - """ - - if seed: - random.seed(seed) - - random_img_path = random.sample(img_path_list, k=n) - for p in random_img_path: - with Image.open(p) as f: - fig, ax = plt.subplots(nrows=1, ncols=2) - ax[0].imshow(f) - ax[0].set_title(f"Origin size: {f.size}") - ax[0].axis(False) - - trans_img = transform(f).permute(1, 2, 0) # we need to change shape for plt - # hwc -> hwc - ax[1].imshow(trans_img) - ax[1].set_title(f"transformed img_shape\n: {trans_img.shape}") - ax[1].axis(False) - - fig.suptitle(f"Class name: {p.parent.stem}", fontsize=16) - - -def display_random_img(dataset: torch.utils.data.Dataset, - classes: List[str] = None, - n: int = 10, - display_shape: bool = True, - seed: int = None): - ''' - a func to display random img - - Args: - classes: list of classname, - n: numbers of img to show - ''' - - # nrow=2 - - # if not n % 2: - # ncol = int(n/2)+1 - # else: - # ncol = int(n/2) - - if n > 10: - n=10 - display_shape = False - print(f"too many pics to display, max to 10 for display purpose") - - if seed: - random.seed(seed) - - # get index of random samples - random_samples_idx = random.sample(range(len(dataset)), k=n) - - plt.figure(figsize=(16,8)) - - # loop through idx and plot - for i, sample_idx in enumerate(random_samples_idx): - image, label = dataset[sample_idx][0].permute(1,2,0), dataset[sample_idx][1] - - plt.subplot(1, n, i+1) - plt.imshow(image) - plt.axis(False) - - if classes: - title = f"Class: {classes[label]}" - if display_shape: - title += f"\nshape: {image.shape}" - plt.title(title) - - -def plot_lr(results: Dict[str, List[float]] or Path): - """ - this funcs plot a lr_scheduler's curve varying with epochs when a training is over - """ - - if type(results) != dict: - results = pd.read_csv(results) - results = results.iloc[:, 1:] # row, col - results = results.to_dict("list") - - else: - pass - - lr = results['learning rate'] - epochs = range(len(results['learning rate'])) - - plt.figure(figsize=(7,7)) - plt.plot(epochs, lr, label='learning rate') - plt.title('learning rate scheduler') - plt.xlabel('Epochs') - plt.legend() - - -def plot_loss_curves(results: Dict[str, List[float]] or Path): - """ - results is a dict and will be like: - {'train_loss': [...], - 'train_acc': [...], - 'test_loss': [...], - 'test_acc': [...]} - """ - if type(results) != dict: - results = pd.read_csv(results) - results = results.iloc[:, 1:] # row, col - results = results.to_dict("list") - - else: - pass - - loss = results['train_loss'] - test_loss = results['test_loss'] - - accuracy = results['train_acc'] - test_accuracy = results['test_acc'] - - epochs = range(len(results['train_loss'])) - - plt.figure(figsize=(15,7)) - - plt.subplot(1, 2, 1) - plt.plot(epochs, loss, label='train_loss') - plt.plot(epochs, test_loss, label='test_loss') - plt.title('Loss') - plt.xlabel('Epochs') - plt.legend() - - plt.subplot(1, 2, 2) - plt.plot(epochs, accuracy, label='train_acc') - plt.plot(epochs, test_accuracy, label='test_acc') - plt.title('Accuracy') - plt.xlabel('Epochs') - plt.legend() - - -def pred_single_img(Model: torch.nn.Module, - image_path: str, - class_name: List[str] = None, - transforms = None, - device: torch.device = torch.device('cpu') - ): - """ - show a img's pred results - """ - - image_done = torchvision.io.read_image(image_path).type(torch.float).to(device) / 255. - Model.to(device) - - if transforms: - image_done = transforms(image_done).unsqueeze(0).to(device) - - Model.eval() - with torch.inference_mode(): - pred = Model(image_done) - pred_probs = torch.softmax(pred, dim=1) - pred_class = torch.argmax(pred_probs, dim=1) - - plt.imshow(image_done.squeeze().permute(1,2,0)) - title = f'Pred: {class_name[pred_class.cpu()]} | Probs: {pred_probs.max().cpu():.4f}' - plt.title(title) - plt.axis(False) - - return pred_probs - - -def plot_conf_mat(predictions: List[int], - num_classes: int, - classname, - dataset_imagefolder: datasets.ImageFolder, - task: str = 'multiclass'): - - confmat = ConfusionMatrix(num_classes=num_classes, - task=task) - - confmat_tensor = confmat(preds=predictions, - target=torch.tensor(dataset_imagefolder.targets)) - - fig, ax = plot_confusion_matrix( - conf_mat=confmat_tensor.numpy(), # plt like working with np - class_names=classname, - figsize=(10,7)) - - -def plot_patch_img(img: torch.Tensor, - img_size: int = 224, - patch_size: int = 16,): - - """this is for ViT demonstrate""" - - - # Setup hyperparameters and make sure img_size and patch_size are compatible - - num_patches = img_size/patch_size - assert img_size % patch_size == 0, "Image size must be divisible by patch size" - - print(f"Number of patches per row: {num_patches}\ - \nNumber of patches per column: {num_patches}\ - \nTotal patches: {num_patches*num_patches}\ - \nPatch size: {patch_size} pixels x {patch_size} pixels") - - image_permuted = img.permute(1, 2, 0) - # Create a series of subplots - fig, axs = plt.subplots(nrows=img_size // patch_size, # need int not float - ncols=img_size // patch_size, - figsize=(num_patches, num_patches), - sharex=True, - sharey=True) - - # Loop through height and width of image - for i, patch_height in enumerate(range(0, img_size, patch_size)): # iterate through height - for j, patch_width in enumerate(range(0, img_size, patch_size)): # iterate through width - - # Plot the permuted image patch (image_permuted -> (Height, Width, Color Channels)) - axs[i, j].imshow(image_permuted[patch_height:patch_height+patch_size, # iterate through height - patch_width:patch_width+patch_size, # iterate through width - :]) # get all color channels - - # Set up label information, remove the ticks for clarity and set labels to outside - axs[i, j].set_ylabel(i+1, - rotation="horizontal", - horizontalalignment="right", - verticalalignment="center" - ) - axs[i, j].set_xlabel(j+1) - axs[i, j].set_xticks([]) - axs[i, j].set_yticks([]) - axs[i, j].label_outer() - - plt.show() - - -def plot_5_feature_map(img_conv_out: torch.Tensor, - embedding_size: int = 768,): - """ - Plot random 5 convolutional feature maps, for ViT - """ - random_indexes = random.sample(range(0, embedding_size), k=5) # pick 5 numbers between 0 and the embedding size - print(f"Showing random convolutional feature maps from indexes: {random_indexes}") - - # Create plot - fig, axs = plt.subplots(nrows=1, ncols=5, figsize=(12, 12)) - - # Plot random image feature maps - for i, idx in enumerate(random_indexes): - img_feature_map = img_conv_out[:, idx, :, :] # index on the output tensor of the convolutional layer - axs[i].imshow(img_feature_map.squeeze().detach().numpy()) - axs[i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]); - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— - - - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— -## Data_load related - -# custom ImageFolder -class RTG_RAM_DataSet(Dataset): - def __init__(self, - dir: str, - transform=None): - super().__init__() - - """ - this is a custom ImageFolder of pytorch - load your data into RAM in advance - can boost the training process - """ - - self.paths = list(Path(dir).glob("*/*.jpg")) # pathlib.Path - - self.transform = transform - - self.classes, self.class_idx = find_classes(dir) - - def load_image(self, index: int) -> Image.Image: - """Open an image via a path and return it""" - - image_path = self.paths[index] - return Image.open(image_path) - - # overwrite __len__() - def __len__(self) -> int: - """return the total num of samples.""" - return len(self.paths) - - # overwrite __getitem__() - def __getitem__(self, index: int) -> Tuple[torch.Tensor, int]: - """return one sample of data, and label like (X, y).""" - img = self.load_image(index) - class_name = self.paths[index].parent.name - class_idx = self.class_idx[class_name] - - # transformation if necessary - if self.transform: - return self.transform(img), class_idx # return data+label (X,y) - else: - return img, class_idx - - -def create_dataloaders( - train_dir: str, - valid_dir: str, - transform: transforms.Compose, - batch_size: int, - test_transform: transforms.Compose = None, - num_workers: int = 0, - test_dir: str = None, - pin_mem: bool = True -): - - """Creates training and testing DataLoaders. - - Takes in a training directory and testing directory path and turns - them into PyTorch Datasets and then into PyTorch DataLoaders. - - Returns: - A tuple of (train_dataloader, test_dataloader, class_names). - Where class_names is a list of the target classes. - - """ - # Use ImageFolder to create dataset(s) - train_data = RTG_RAM_DataSet(train_dir, transform=transform) - valid_data = RTG_RAM_DataSet(valid_dir, transform=transform) - - if test_dir : - test_data = RTG_RAM_DataSet(test_dir, transform=test_transform) - - test_dataloader = DataLoader( - test_data, - batch_size=batch_size, - shuffle=False, - num_workers=0, - pin_memory=pin_mem,) - else: - pass - - # Get class names - class_names = train_data.classes - - # Turn images into data loaders - train_dataloader = DataLoader( - train_data, - batch_size=batch_size, - shuffle=True, - num_workers=num_workers, - pin_memory=pin_mem, - ) - - valid_dataloader = DataLoader( - valid_data, - batch_size=batch_size, - shuffle=True, - num_workers=num_workers, - pin_memory=pin_mem, - ) - - - if test_dir: - return train_dataloader, valid_dataloader, test_dataloader, class_names - else: - return train_dataloader, valid_dataloader, class_names - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— - - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— -## model related - -def print_train_time(start: float, - end: float, - device: torch.device = None): - """Prints and return time cost.""" - total_time = end - start - print(f"train time on {device}: {total_time:.3f} seconds") - return total_time - -def lr_scheduler_setting(optima: torch.optim.Optimizer, - linearLR_factor: float = 0.1, - expLR_gamma: float = 0.95, - constLR_factor: float = 0.1, - mileston1: int = 30, - mileston2: int = 60, - epochs: int = 100): - - - last = epochs-mileston2 - optima = optima - - - if mileston1 > mileston2 or mileston1 > epochs: - raise ValueError('mileston1 should smaller than epochs or mileston2') - if mileston2 < mileston1 or mileston2 > epochs: - raise ValueError('mileston2 should larger than mileston1 or smaller than epochs') - - scheduler1 = torch.optim.lr_scheduler.LinearLR(optima, start_factor=linearLR_factor) - scheduler2 = torch.optim.lr_scheduler.ExponentialLR(optima, gamma=expLR_gamma) # also need to tune gamma here - scheduler3 = torch.optim.lr_scheduler.ConstantLR(optima, factor=constLR_factor, total_iters=last) - scheduler = torch.optim.lr_scheduler.SequentialLR(optima, schedulers=[scheduler1, scheduler2, scheduler3], milestones=[mileston1, mileston2]) - - return scheduler - - -def general_train_setup(Model: nn.Module, - train_path: Path, - valid_path: Path, - test_path: Path, - transform: transforms, - test_transform: transforms, - batch_size: int = 8, - num_worker: int = 8, # cpu cores - init_lr: float = 0.01 - ): - - """ - quick setup for a training - - Returns: - a dict that contain dataloader, lr_scheduler(if needed), loss_fn, optimizing_func, classnames - """ - - train_dataloader, valid_dataloader, test_dataloader, class_name = create_dataloaders(train_dir=train_path, - valid_dir=valid_path, - test_dir=test_path, - test_transform=test_transform, - batch_size=batch_size, - num_workers=num_worker, - transform=transform, - pin_mem=True) - - - loss_fn = torch.nn.CrossEntropyLoss() - optima = torch.optim.AdamW(params=Model.parameters(), lr=init_lr, eps=1e-3) # 0.01 - - - if test_path: - return {'train_dataloader': train_dataloader, - 'valid_dataloader': valid_dataloader, - 'test_dataloader': test_dataloader, - 'class_name': class_name, - 'loss_fn': loss_fn, - 'optima': optima} - else: - return {'train_dataloader': train_dataloader, - 'valid_dataloader': valid_dataloader, - 'class_name': class_name, - 'loss_fn': loss_fn, - 'optima': optima} - - -def train_step(Model: torch.nn.Module, - data_loader: torch.utils.data.DataLoader, - loss_fn: torch.nn.Module, - optima: torch.optim.Optimizer, - #accuracy_fn, - device: torch.device = torch.device("cpu")): - """ - Performs a training with model trying to learn on data loader. - train a single step - """ - - train_loss, train_acc = 0, 0 - - Model.to(device) - # with torch.cuda.device(device=device): # this is useless - Model.train() - - for _, (X, y) in enumerate(data_loader): - # batch - X, y = X.to(device), y.to(device) - - y_pred_t = Model(X) - loss_t = loss_fn(y_pred_t, y) - loss_t.backward() - optima.step() # updata params per batch, not per epoch - - optima.zero_grad(set_to_none=True) - # for param in Model.parameters(): - # param.grad = None - - train_loss += loss_t.item() # .item() turn single tensor into a single scaler - y_pred_t_class = torch.argmax(y_pred_t, dim=1) - train_acc += torch.eq(y_pred_t_class, y).sum().item()/len(y_pred_t) * 100 - - - train_loss /= len(data_loader) - train_acc /= len(data_loader) - - # print(f"Train loss: {train_loss:.4f} | Train acc: {train_acc:.4f}%") - return train_acc, train_loss - - -def test_step(Model: torch.nn.Module, - data_loader: torch.utils.data.DataLoader, - loss_fn: torch.nn.Module, - #accuracy_fn, - device: torch.device = torch.device("cpu")): - '''test/valid a single step''' - - test_loss, test_acc = 0, 0 - - Model.to(device) - - Model.eval() - with torch.inference_mode(): - for X, y in data_loader: - - X, y = X.to(device), y.to(device) - - y_pred_e = Model(X) - test_loss += loss_fn(y_pred_e, y).item() - - y_pred_e_labels = y_pred_e.argmax(dim=1) - test_acc += torch.eq(y_pred_e_labels, y).sum().item()/len(y_pred_e) * 100 - - # test_acc += accuracy_fn(y_true=y, - # y_pred=y_pred_e.argmax(dim=1)) - - test_loss /= len(data_loader) - test_acc /= len(data_loader) - - # print(f"Test loss: {test_loss:.4F} | Test acc: {test_acc:.4F}%\n") - return test_acc, test_loss - - -def train_test_loop(Model: torch.nn.Module, - train_loader: torch.utils.data.DataLoader, - test_loader: torch.utils.data.DataLoader, - epochs: int, - optima: torch.optim.Optimizer, - scheduler: torch.optim.lr_scheduler = None, - #accuracy_fn, - loss_fn: torch.nn.Module = nn.CrossEntropyLoss(), - device: torch.device = torch.device("cpu")): - - if scheduler is not None: - results = {'train_loss': [], - 'train_acc': [], - 'test_loss': [], - 'test_acc': [], - 'learning rate': []} - else: - results = {'train_loss': [], - 'train_acc': [], - 'test_loss': [], - 'test_acc': [],} - - Model.to(device) - time_start = timer() - - for ep in tqdm(range(epochs)): - - train_acc, train_loss = train_step(Model=Model, - data_loader=train_loader, - loss_fn=loss_fn, - optima=optima, - device=device) - - test_acc, test_loss = test_step(Model=Model, - data_loader=test_loader, - loss_fn=loss_fn, - device=device) - - if scheduler is not None: - current_lr = optima.param_groups[0]['lr'] - results['learning rate'].append(current_lr) - scheduler.step() - - print(f"Epoch: {ep+1} | " - f"train_loss: {train_loss:.4f} | " - f"train_acc: {train_acc:.4f} | " - f"test_loss: {test_loss:.4f} | " - f"test_acc: {test_acc:.4f}" - ) - - results['train_loss'].append(train_loss) - results['train_acc'].append(train_acc) - results['test_loss'].append(test_loss) - results['test_acc'].append(test_acc) - - time_end = timer() - _ = print_train_time(start=time_start, - end=time_end, - device=device) - - return results - - -def train_test_loop_with_amp(Model: torch.nn.Module, - train_loader: torch.utils.data.DataLoader, - test_loader: torch.utils.data.DataLoader, - epochs: int, - optima: torch.optim.Optimizer, - scheduler: torch.optim.lr_scheduler = None, - loss_fn: torch.nn.Module = nn.CrossEntropyLoss(), - device: torch.device = torch.device("cpu")): - - """ - using AMP to training - """ - - if scheduler is not None: - results = {'train_loss': [], - 'train_acc': [], - 'test_loss': [], - 'test_acc': [], - 'learning rate': []} - else: - results = {'train_loss': [], - 'train_acc': [], - 'test_loss': [], - 'test_acc': [],} - - - # train_loss, train_acc = 0, 0 - - Model.to(device) - Model.train() - - scaler = torch.cuda.amp.GradScaler(enabled=True) - time_start = timer() - for ep in tqdm(range(epochs)): - - train_loss, train_acc = 0, 0 #?? maybe to avoid nan? - - for X, y in train_loader: - X, y = X.to(device), y.to(device) - - optima.zero_grad(set_to_none=True) - # for param in Model.parameters(): - # param.grad = None - - with torch.autocast(device_type=str(device), dtype=torch.float16): - - y_pred_t = Model(X) - loss_t = loss_fn(y_pred_t, y) - - # or maybe we should move this two line inside of AMP block? - train_loss += loss_t.item() # .item() turn single tensor into a single scaler - y_pred_t_class = torch.argmax(y_pred_t, dim=1) - train_acc += torch.eq(y_pred_t_class, y).sum().item()/len(y_pred_t) * 100 - - scaler.scale(loss_t).backward() # none type - - scaler.unscale_(optima) - - torch.nn.utils.clip_grad_norm_(Model.parameters(), max_norm=0.1) - - scaler.step(optima) - scaler.update() - - # loss_t.backward() - # optima.step() - - train_loss /= len(train_loader) - train_acc /= len(train_loader) - - if train_acc > 100: - train_acc = 100.0000 - - test_acc, test_loss = test_step(Model=Model, - data_loader=test_loader, - loss_fn=loss_fn, - device=device) - - if scheduler is not None: - optima.zero_grad(set_to_none=True) - optima.step() - current_lr = optima.param_groups[0]['lr'] - results['learning rate'].append(current_lr) - scheduler.step() - - print(f"Epoch: {ep+1} | " - f"train_loss: {train_loss:.4f} | " # nan??? - f"train_acc: {train_acc:.4f} | " - f"test_loss: {test_loss:.4f} | " # nan??? - f"test_acc: {test_acc:.4f}" - ) - - results['train_loss'].append(train_loss) - results['train_acc'].append(train_acc) - results['test_loss'].append(test_loss) - results['test_acc'].append(test_acc) - - # gc.collect() - # torch.cuda.empty_cache() - - time_end = timer() - print_train_time(start=time_start, - end=time_end, - device=device) - - return results - - - -def eval_model(Model: torch.nn.Module, - eval_loader: torch.utils.data.DataLoader, - loss_fn: torch.nn.Module = nn.CrossEntropyLoss(), - show: bool = True, - device: torch.device = torch.device("cpu")): - ''' - eval model prediction results, return loss, acc, pred_tensor - pred_tensor is for the plot of confusion matrix - ''' - loss = 0 - acc = 0 - preds = [] - - Model.to(device) - Model.eval() - with torch.inference_mode(): - for X, y in tqdm(eval_loader): - X, y = X.to(device), y.to(device) - - raw_logits = Model(X) - - loss += loss_fn(raw_logits, y).item() - pred_label = torch.argmax(raw_logits, dim=1) - - prediction = torch.argmax(raw_logits.squeeze(0), dim=1) # using this for confusion matrix - preds.append(prediction.cpu()) - - acc += torch.eq(pred_label, y).sum().item()/len(raw_logits) * 100 - - loss /= len(eval_loader) - acc /= len(eval_loader) - - predictions_tensor = torch.cat(preds) - - if show: - print(f"Model: {Model.__class__.__name__}") - print(f"Eval loss: {loss:.4F} | Eval acc: {acc:.4F}%\n") - return loss, acc, predictions_tensor - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— -## result saving -def save_model(model: torch.nn.Module, - target_dir: str, - model_name: str): - """Saves a PyTorch model to a target directory. - - Args: - model: A target PyTorch model to save. - target_dir: A directory for saving the model to. - model_name: A filename for the saved model. Should include - either ".pth" or ".pt" as the file extension. - - """ - # Create target directory - target_dir_path = Path(target_dir) - target_dir_path.mkdir(parents=True, - exist_ok=True) - - # Create model save path - assert model_name.endswith(".pth") or model_name.endswith(".pt"), "model_name should end with '.pt' or '.pth'" - model_save_path = target_dir_path / model_name - - # Save the whole model, not only the state_dict(), so that we don't have to init model structure instance everytime - print(f"[INFO] Saving model to: {model_save_path}") - torch.save(obj=model, # .state_dict(), - f=model_save_path) - - -def save_results(results: Dict[str, List[float]], - path_and_filename: str): - '''save Dict results into csv format''' - - print(f"[INFO] Saving results to: {path_and_filename}") - df = pd.DataFrame(results) - df.to_csv(path_and_filename, index=False) - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— - - -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— -## result analyze related - -def pred_wrong_and_store(path: Path, # class1..classn/img.jpg - Model, - transform, - class_names, - top_num: int = 5, - show: bool = True, - device: torch.device = torch.device('cpu')): - """ - preds some img on a model and store the results - and also grab and plot some most wrong examples - - Returns: - a sorted pandas dataframe - """ - - pred_list = [] - - # first, get a list contain every single img path - img_path_list = list(Path(path).glob("*/*.jpg")) - - - for path in tqdm(img_path_list): - - # a empty dict to store every img result - pred_dict = {} - - # get sample path - pred_dict['img_path'] = path - - # get class name - class_name = path.parent.stem - pred_dict["class_names"] = class_name - - start_time = timer() - - # get predictions - img = Image.open(path) - transformed_img = transform(img).unsqueeze(0).to(device) - - Model.to(device) - Model.eval() - with torch.inference_mode(): - pred_logits = Model(transformed_img) - pred_probs = torch.softmax(pred_logits, dim=1) - pred_label = torch.argmax(pred_probs, dim=1) - pred_class = class_names[pred_label.cpu()] - - pred_dict["pred_probs"] = pred_probs.unsqueeze(0).max().cpu().item() # make sure result back to cpu - pred_dict["pred_class"] = pred_class # convient for plot - - end_time = timer() - pred_dict["time_for_pred"] = round(end_time-start_time, 4) - - pred_dict['correct'] = class_name == pred_class - - pred_list.append(pred_dict) - - pred_df = pd.DataFrame(pred_list) - sorted_pred_df = pred_df.sort_values(by=['correct', 'pred_probs'], ascending=[True, False]) - - if show: - most_wrong = sorted_pred_df.head(n=top_num) - - for row in most_wrong.iterrows(): - data_row = row[1] - img_path = data_row[0] - true_label = data_row[1] - pred_prob = data_row[2] - pred_class = data_row[3] - - # plot img - img = torchvision.io.read_image(str(img_path)) # read to tensor - plt.figure() - plt.imshow(img.permute(1, 2, 0)) # h x w x c - plt.title(f"True: {true_label} | Pred: {pred_class} | Prob: {pred_prob}") - plt.axis(False); - else: - pass - - return sorted_pred_df - - -def check_model_size(path, show=True): - """check a model's size""" - - size = Path(path).stat().st_size // (1024*1024) - if show: - print(f"model size: {size:.3f} MB") - - return size - - -def general_test(Model, - model_path, - class_name, - manual_transforms, - test_path, loss_fn, - valid_loader): - - """ - run a general test on a model - including model_size, params, loss and acc on test set, pred_time and so on - - Returns: - a dict - """ - - stat = {} - print(f'[INFO] running general test on: {Model._get_name()}') - - model_size = check_model_size(model_path, show=False) - print('size check ... done') - model_params = sum(torch.numel(param) for param in Model.parameters()) - print('params check ... done') - loss, acc, _ = eval_model(Model, valid_loader, loss_fn, show=False) - print('valid evaluate ... done') - pred_df = pred_wrong_and_store(test_path, Model, manual_transforms, class_name, show=False) - print('prediction test ... done') - average_time_per_pred = round(pred_df.time_for_pred.mean(), 4) - print('predict time calculate ... done') - test_acc = pred_df.correct.value_counts()[0]*100/len(pred_df) - print('real accurate calculate ... done') - - stat['valid_loss'] = loss - stat['valid_acc'] = acc - stat['test_acc'] = test_acc - stat['number_of_parameters'] = model_params - stat['model_size (MB)'] = model_size - stat['time_per_pred_cpu'] = average_time_per_pred - - print("test results:") - print(stat) - - return stat -# ———————————————————————————————————————————————————————————————————————————————————————————————————————————— \ No newline at end of file diff --git a/spaces/RedBaron5/PatentSolver/README.md b/spaces/RedBaron5/PatentSolver/README.md deleted file mode 100644 index ceee5cf77c6480363b635c60d2a67f67613b854a..0000000000000000000000000000000000000000 --- a/spaces/RedBaron5/PatentSolver/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: PatentSolver -emoji: 🚀 -colorFrom: gray -colorTo: gray -sdk: streamlit -app_file: app.py -pinned: false -duplicated_from: xin/PatentSolver ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmseg/apis/inference.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmseg/apis/inference.py deleted file mode 100644 index 90bc1c0c68525734bd6793f07c15fe97d3c8342c..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmseg/apis/inference.py +++ /dev/null @@ -1,136 +0,0 @@ -import matplotlib.pyplot as plt -import annotator.uniformer.mmcv as mmcv -import torch -from annotator.uniformer.mmcv.parallel import collate, scatter -from annotator.uniformer.mmcv.runner import load_checkpoint - -from annotator.uniformer.mmseg.datasets.pipelines import Compose -from annotator.uniformer.mmseg.models import build_segmentor - - -def init_segmentor(config, checkpoint=None, device='cuda:0'): - """Initialize a segmentor from config file. - - Args: - config (str or :obj:`mmcv.Config`): Config file path or the config - object. - checkpoint (str, optional): Checkpoint path. If left as None, the model - will not load any weights. - device (str, optional) CPU/CUDA device option. Default 'cuda:0'. - Use 'cpu' for loading model on CPU. - Returns: - nn.Module: The constructed segmentor. - """ - if isinstance(config, str): - config = mmcv.Config.fromfile(config) - elif not isinstance(config, mmcv.Config): - raise TypeError('config must be a filename or Config object, ' - 'but got {}'.format(type(config))) - config.model.pretrained = None - config.model.train_cfg = None - model = build_segmentor(config.model, test_cfg=config.get('test_cfg')) - if checkpoint is not None: - checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') - model.CLASSES = checkpoint['meta']['CLASSES'] - model.PALETTE = checkpoint['meta']['PALETTE'] - model.cfg = config # save the config in the model for convenience - model.to(device) - model.eval() - return model - - -class LoadImage: - """A simple pipeline to load image.""" - - def __call__(self, results): - """Call function to load images into results. - - Args: - results (dict): A result dict contains the file name - of the image to be read. - - Returns: - dict: ``results`` will be returned containing loaded image. - """ - - if isinstance(results['img'], str): - results['filename'] = results['img'] - results['ori_filename'] = results['img'] - else: - results['filename'] = None - results['ori_filename'] = None - img = mmcv.imread(results['img']) - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - return results - - -def inference_segmentor(model, img): - """Inference image(s) with the segmentor. - - Args: - model (nn.Module): The loaded segmentor. - imgs (str/ndarray or list[str/ndarray]): Either image files or loaded - images. - - Returns: - (list[Tensor]): The segmentation result. - """ - cfg = model.cfg - device = next(model.parameters()).device # model device - # build the data pipeline - test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] - test_pipeline = Compose(test_pipeline) - # prepare data - data = dict(img=img) - data = test_pipeline(data) - data = collate([data], samples_per_gpu=1) - if next(model.parameters()).is_cuda: - # scatter to specified GPU - data = scatter(data, [device])[0] - else: - data['img_metas'] = [i.data[0] for i in data['img_metas']] - - # forward the model - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - return result - - -def show_result_pyplot(model, - img, - result, - palette=None, - fig_size=(15, 10), - opacity=0.5, - title='', - block=True): - """Visualize the segmentation results on the image. - - Args: - model (nn.Module): The loaded segmentor. - img (str or np.ndarray): Image filename or loaded image. - result (list): The segmentation result. - palette (list[list[int]]] | None): The palette of segmentation - map. If None is given, random palette will be generated. - Default: None - fig_size (tuple): Figure size of the pyplot figure. - opacity(float): Opacity of painted segmentation map. - Default 0.5. - Must be in (0, 1] range. - title (str): The title of pyplot figure. - Default is ''. - block (bool): Whether to block the pyplot figure. - Default is True. - """ - if hasattr(model, 'module'): - model = model.module - img = model.show_result( - img, result, palette=palette, show=False, opacity=opacity) - # plt.figure(figsize=fig_size) - # plt.imshow(mmcv.bgr2rgb(img)) - # plt.title(title) - # plt.tight_layout() - # plt.show(block=block) - return mmcv.bgr2rgb(img) diff --git a/spaces/Rominn/vits-uma-genshin-honkai/text/symbols.py b/spaces/Rominn/vits-uma-genshin-honkai/text/symbols.py deleted file mode 100644 index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000 --- a/spaces/Rominn/vits-uma-genshin-honkai/text/symbols.py +++ /dev/null @@ -1,39 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -'''# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' -''' - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' - - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") \ No newline at end of file diff --git a/spaces/SeViLA/SeViLA/lavis/datasets/builders/image_text_pair_builder.py b/spaces/SeViLA/SeViLA/lavis/datasets/builders/image_text_pair_builder.py deleted file mode 100644 index 90e411eb6d41c23c15dbf5a0c67e2b68d467b43b..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/datasets/builders/image_text_pair_builder.py +++ /dev/null @@ -1,77 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import os -from lavis.common.registry import registry - -from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder -from lavis.datasets.datasets.image_text_pair_datasets import ImageTextPairDataset -from lavis.datasets.datasets.laion_dataset import LaionDataset - - -@registry.register_builder("conceptual_caption_3m") -class ConceptualCaption3MBuilder(BaseDatasetBuilder): - train_dataset_cls = ImageTextPairDataset - - DATASET_CONFIG_DICT = { - "default": "configs/datasets/conceptual_caption/defaults_3m.yaml" - } - - -@registry.register_builder("conceptual_caption_12m") -class ConceptualCaption12MBuilder(BaseDatasetBuilder): - train_dataset_cls = ImageTextPairDataset - - DATASET_CONFIG_DICT = { - "default": "configs/datasets/conceptual_caption/defaults_12m.yaml" - } - - -@registry.register_builder("sbu_caption") -class SBUCaptionBuilder(BaseDatasetBuilder): - train_dataset_cls = ImageTextPairDataset - - DATASET_CONFIG_DICT = {"default": "configs/datasets/sbu_caption/defaults.yaml"} - - -@registry.register_builder("vg_caption") -class VGCaptionBuilder(BaseDatasetBuilder): - train_dataset_cls = ImageTextPairDataset - - DATASET_CONFIG_DICT = {"default": "configs/datasets/vg/defaults_caption.yaml"} - - -@registry.register_builder("laion2B_multi") -class Laion2BMultiBuilder(BaseDatasetBuilder): - train_dataset_cls = LaionDataset - - DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults_2B_multi.yaml"} - - def _download_ann(self): - pass - - def _download_vis(self): - pass - - def build(self): - self.build_processors() - - build_info = self.config.build_info - - datasets = dict() - split = "train" # laion dataset only has train split - - # create datasets - # [NOTE] return inner_datasets (wds.DataPipeline) - dataset_cls = self.train_dataset_cls - datasets[split] = dataset_cls( - vis_processor=self.vis_processors[split], - text_processor=self.text_processors[split], - location=build_info.storage, - ).inner_dataset - - return datasets diff --git a/spaces/Sparkles-AI/design-look-a-likes/README.md b/spaces/Sparkles-AI/design-look-a-likes/README.md deleted file mode 100644 index 4e32a4efc3f4eb718c2adf9d79a952eadbf4acea..0000000000000000000000000000000000000000 --- a/spaces/Sparkles-AI/design-look-a-likes/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Design Look A Likes -emoji: 👁 -colorFrom: gray -colorTo: indigo -sdk: docker -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/shortcuts/auto_match.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/shortcuts/auto_match.py deleted file mode 100644 index 6c2b1ef70c9051304efa42ba7af348c7299e5534..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/shortcuts/auto_match.py +++ /dev/null @@ -1,104 +0,0 @@ -""" -Utilities function for keybinding with prompt toolkit. - -This will be bound to specific key press and filter modes, -like whether we are in edit mode, and whether the completer is open. -""" -import re -from prompt_toolkit.key_binding import KeyPressEvent - - -def parenthesis(event: KeyPressEvent): - """Auto-close parenthesis""" - event.current_buffer.insert_text("()") - event.current_buffer.cursor_left() - - -def brackets(event: KeyPressEvent): - """Auto-close brackets""" - event.current_buffer.insert_text("[]") - event.current_buffer.cursor_left() - - -def braces(event: KeyPressEvent): - """Auto-close braces""" - event.current_buffer.insert_text("{}") - event.current_buffer.cursor_left() - - -def double_quote(event: KeyPressEvent): - """Auto-close double quotes""" - event.current_buffer.insert_text('""') - event.current_buffer.cursor_left() - - -def single_quote(event: KeyPressEvent): - """Auto-close single quotes""" - event.current_buffer.insert_text("''") - event.current_buffer.cursor_left() - - -def docstring_double_quotes(event: KeyPressEvent): - """Auto-close docstring (double quotes)""" - event.current_buffer.insert_text('""""') - event.current_buffer.cursor_left(3) - - -def docstring_single_quotes(event: KeyPressEvent): - """Auto-close docstring (single quotes)""" - event.current_buffer.insert_text("''''") - event.current_buffer.cursor_left(3) - - -def raw_string_parenthesis(event: KeyPressEvent): - """Auto-close parenthesis in raw strings""" - matches = re.match( - r".*(r|R)[\"'](-*)", - event.current_buffer.document.current_line_before_cursor, - ) - dashes = matches.group(2) if matches else "" - event.current_buffer.insert_text("()" + dashes) - event.current_buffer.cursor_left(len(dashes) + 1) - - -def raw_string_bracket(event: KeyPressEvent): - """Auto-close bracker in raw strings""" - matches = re.match( - r".*(r|R)[\"'](-*)", - event.current_buffer.document.current_line_before_cursor, - ) - dashes = matches.group(2) if matches else "" - event.current_buffer.insert_text("[]" + dashes) - event.current_buffer.cursor_left(len(dashes) + 1) - - -def raw_string_braces(event: KeyPressEvent): - """Auto-close braces in raw strings""" - matches = re.match( - r".*(r|R)[\"'](-*)", - event.current_buffer.document.current_line_before_cursor, - ) - dashes = matches.group(2) if matches else "" - event.current_buffer.insert_text("{}" + dashes) - event.current_buffer.cursor_left(len(dashes) + 1) - - -def skip_over(event: KeyPressEvent): - """Skip over automatically added parenthesis/quote. - - (rather than adding another parenthesis/quote)""" - event.current_buffer.cursor_right() - - -def delete_pair(event: KeyPressEvent): - """Delete auto-closed parenthesis""" - event.current_buffer.delete() - event.current_buffer.delete_before_cursor() - - -auto_match_parens = {"(": parenthesis, "[": brackets, "{": braces} -auto_match_parens_raw_string = { - "(": raw_string_parenthesis, - "[": raw_string_bracket, - "{": raw_string_braces, -} diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/array/doc_list/io.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/array/doc_list/io.py deleted file mode 100644 index c2b531c25502840501dc1a773bb84eba153d9240..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/array/doc_list/io.py +++ /dev/null @@ -1,826 +0,0 @@ -import base64 -import csv -import io -import os -import pathlib -import pickle -from abc import abstractmethod -from contextlib import nullcontext -from io import StringIO, TextIOWrapper -from itertools import compress -from typing import ( - TYPE_CHECKING, - Any, - BinaryIO, - ContextManager, - Dict, - Generator, - Iterable, - Iterator, - List, - Optional, - Tuple, - Type, - TypeVar, - Union, -) - -import orjson - -from docarray.base_doc import AnyDoc, BaseDoc -from docarray.base_doc.io.json import orjson_dumps -from docarray.helper import ( - _access_path_dict_to_nested_dict, - _all_access_paths_valid, - _dict_to_access_paths, -) -from docarray.utils._internal.compress import _decompress_bytes, _get_compress_ctx -from docarray.utils._internal.misc import import_library - -if TYPE_CHECKING: - import pandas as pd - - from docarray import DocList - from docarray.proto import DocListProto - -T = TypeVar('T', bound='IOMixinArray') -T_doc = TypeVar('T_doc', bound=BaseDoc) - -ARRAY_PROTOCOLS = {'protobuf-array', 'pickle-array', 'json-array'} -SINGLE_PROTOCOLS = {'pickle', 'protobuf', 'json'} -ALLOWED_PROTOCOLS = ARRAY_PROTOCOLS.union(SINGLE_PROTOCOLS) -ALLOWED_COMPRESSIONS = {'lz4', 'bz2', 'lzma', 'zlib', 'gzip'} - - -def _protocol_and_compress_from_file_path( - file_path: Union[pathlib.Path, str], - default_protocol: Optional[str] = None, - default_compress: Optional[str] = None, -) -> Tuple[Optional[str], Optional[str]]: - """Extract protocol and compression algorithm from a string, use defaults if not found. - :param file_path: path of a file. - :param default_protocol: default serialization protocol used in case not found. - :param default_compress: default compression method used in case not found. - Examples: - >>> _protocol_and_compress_from_file_path('./docarray_fashion_mnist.protobuf.gzip') - ('protobuf', 'gzip') - >>> _protocol_and_compress_from_file_path('/Documents/docarray_fashion_mnist.protobuf') - ('protobuf', None) - >>> _protocol_and_compress_from_file_path('/Documents/docarray_fashion_mnist.gzip') - (None, gzip) - """ - - protocol = default_protocol - compress = default_compress - - file_extensions = [e.replace('.', '') for e in pathlib.Path(file_path).suffixes] - for extension in file_extensions: - if extension in ALLOWED_PROTOCOLS: - protocol = extension - elif extension in ALLOWED_COMPRESSIONS: - compress = extension - - return protocol, compress - - -class _LazyRequestReader: - def __init__(self, r): - self._data = r.iter_content(chunk_size=1024 * 1024) - self.content = b'' - - def __getitem__(self, item: slice): - while len(self.content) < item.stop: - try: - self.content += next(self._data) - except StopIteration: - return self.content[item.start : -1 : item.step] - return self.content[item] - - -class IOMixinArray(Iterable[T_doc]): - doc_type: Type[T_doc] - - @abstractmethod - def __len__(self): - ... - - @abstractmethod - def __init__( - self, - docs: Optional[Iterable[BaseDoc]] = None, - ): - ... - - @classmethod - def from_protobuf(cls: Type[T], pb_msg: 'DocListProto') -> T: - """create a Document from a protobuf message - :param pb_msg: The protobuf message from where to construct the DocList - """ - return cls(cls.doc_type.from_protobuf(doc_proto) for doc_proto in pb_msg.docs) - - def to_protobuf(self) -> 'DocListProto': - """Convert `DocList` into a Protobuf message""" - from docarray.proto import DocListProto - - da_proto = DocListProto() - for doc in self: - da_proto.docs.append(doc.to_protobuf()) - - return da_proto - - @classmethod - def from_bytes( - cls: Type[T], - data: bytes, - protocol: str = 'protobuf-array', - compress: Optional[str] = None, - show_progress: bool = False, - ) -> T: - """Deserialize bytes into a `DocList`. - - :param data: Bytes from which to deserialize - :param protocol: protocol that was used to serialize - :param compress: compression algorithm that was used to serialize between `lz4`, `bz2`, `lzma`, `zlib`, `gzip` - :param show_progress: show progress bar, only works when protocol is `pickle` or `protobuf` - :return: the deserialized `DocList` - """ - return cls._load_binary_all( - file_ctx=nullcontext(data), - protocol=protocol, - compress=compress, - show_progress=show_progress, - ) - - def _write_bytes( - self, - bf: BinaryIO, - protocol: str = 'protobuf-array', - compress: Optional[str] = None, - show_progress: bool = False, - ) -> None: - if protocol in ARRAY_PROTOCOLS: - compress_ctx = _get_compress_ctx(compress) - else: - # delegate the compression to per-doc compression - compress_ctx = None - - fc: ContextManager - if compress_ctx is None: - # if compress do not support streaming then postpone the compress - # into the for-loop - f, fc = bf, nullcontext() - else: - f = compress_ctx(bf) - fc = f - compress = None - - with fc: - if protocol == 'protobuf-array': - f.write(self.to_protobuf().SerializePartialToString()) - elif protocol == 'pickle-array': - f.write(pickle.dumps(self)) - elif protocol == 'json-array': - f.write(self.to_json()) - elif protocol in SINGLE_PROTOCOLS: - f.write( - b''.join( - self._to_binary_stream( - protocol=protocol, - compress=compress, - show_progress=show_progress, - ) - ) - ) - else: - raise ValueError( - f'protocol={protocol} is not supported. Can be only {ALLOWED_PROTOCOLS}.' - ) - - def _to_binary_stream( - self, - protocol: str = 'protobuf', - compress: Optional[str] = None, - show_progress: bool = False, - ) -> Iterator[bytes]: - from rich import filesize - - if show_progress: - from docarray.utils._internal.progress_bar import _get_progressbar - - pbar, t = _get_progressbar( - 'Serializing', disable=not show_progress, total=len(self) - ) - else: - from contextlib import nullcontext - - pbar = nullcontext() - - yield self._stream_header - - with pbar: - if show_progress: - _total_size = 0 - pbar.start_task(t) - for doc in self: - doc_bytes = doc.to_bytes(protocol=protocol, compress=compress) - len_doc_as_bytes = len(doc_bytes).to_bytes(4, 'big', signed=False) - all_bytes = len_doc_as_bytes + doc_bytes - - yield all_bytes - - if show_progress: - _total_size += len(all_bytes) - pbar.update( - t, - advance=1, - total_size=str(filesize.decimal(_total_size)), - ) - - def to_bytes( - self, - protocol: str = 'protobuf-array', - compress: Optional[str] = None, - file_ctx: Optional[BinaryIO] = None, - show_progress: bool = False, - ) -> Optional[bytes]: - """Serialize itself into `bytes`. - - For more Pythonic code, please use ``bytes(...)``. - - :param protocol: protocol to use. It can be 'pickle-array', 'protobuf-array', 'pickle' or 'protobuf' - :param compress: compress algorithm to use between : `lz4`, `bz2`, `lzma`, `zlib`, `gzip` - :param file_ctx: File or filename or serialized bytes where the data is stored. - :param show_progress: show progress bar, only works when protocol is `pickle` or `protobuf` - :return: the binary serialization in bytes or None if file_ctx is passed where to store - """ - - with file_ctx or io.BytesIO() as bf: - self._write_bytes( - bf=bf, - protocol=protocol, - compress=compress, - show_progress=show_progress, - ) - if isinstance(bf, io.BytesIO): - return bf.getvalue() - - return None - - @classmethod - def from_base64( - cls: Type[T], - data: str, - protocol: str = 'protobuf-array', - compress: Optional[str] = None, - show_progress: bool = False, - ) -> T: - """Deserialize base64 strings into a `DocList`. - - :param data: Base64 string to deserialize - :param protocol: protocol that was used to serialize - :param compress: compress algorithm that was used to serialize between `lz4`, `bz2`, `lzma`, `zlib`, `gzip` - :param show_progress: show progress bar, only works when protocol is `pickle` or `protobuf` - :return: the deserialized `DocList` - """ - return cls._load_binary_all( - file_ctx=nullcontext(base64.b64decode(data)), - protocol=protocol, - compress=compress, - show_progress=show_progress, - ) - - def to_base64( - self, - protocol: str = 'protobuf-array', - compress: Optional[str] = None, - show_progress: bool = False, - ) -> str: - """Serialize itself into base64 encoded string. - - :param protocol: protocol to use. It can be 'pickle-array', 'protobuf-array', 'pickle' or 'protobuf' - :param compress: compress algorithm to use between `lz4`, `bz2`, `lzma`, `zlib`, `gzip` - :param show_progress: show progress bar, only works when protocol is `pickle` or `protobuf` - :return: the binary serialization in bytes or None if file_ctx is passed where to store - """ - with io.BytesIO() as bf: - self._write_bytes( - bf=bf, - compress=compress, - protocol=protocol, - show_progress=show_progress, - ) - return base64.b64encode(bf.getvalue()).decode('utf-8') - - @classmethod - def from_json( - cls: Type[T], - file: Union[str, bytes, bytearray], - ) -> T: - """Deserialize JSON strings or bytes into a `DocList`. - - :param file: JSON object from where to deserialize a `DocList` - :return: the deserialized `DocList` - """ - json_docs = orjson.loads(file) - return cls([cls.doc_type(**v) for v in json_docs]) - - def to_json(self) -> bytes: - """Convert the object into JSON bytes. Can be loaded via `.from_json`. - :return: JSON serialization of `DocList` - """ - return orjson_dumps(self) - - @classmethod - def from_csv( - cls, - file_path: str, - encoding: str = 'utf-8', - dialect: Union[str, csv.Dialect] = 'excel', - ) -> 'DocList': - """ - Load a DocList from a csv file following the schema defined in the - [`.doc_type`][docarray.DocList] attribute. - Every row of the csv file will be mapped to one document in the doc_list. - The column names (defined in the first row) have to match the field names - of the Document type. - For nested fields use "__"-separated access paths, such as `'image__url'`. - - List-like fields (including field of type DocList) are not supported. - - :param file_path: path to csv file to load DocList from. - :param encoding: encoding used to read the csv file. Defaults to 'utf-8'. - :param dialect: defines separator and how to handle whitespaces etc. - Can be a [`csv.Dialect`](https://docs.python.org/3/library/csv.html#csv.Dialect) - instance or one string of: - `'excel'` (for comma separated values), - `'excel-tab'` (for tab separated values), - `'unix'` (for csv file generated on UNIX systems). - - :return: `DocList` object - """ - if cls.doc_type == AnyDoc: - raise TypeError( - 'There is no document schema defined. ' - 'Please specify the DocList\'s Document type using `DocList[MyDoc]`.' - ) - - if file_path.startswith('http'): - import urllib.request - - with urllib.request.urlopen(file_path) as f: - file = StringIO(f.read().decode(encoding)) - return cls._from_csv_file(file, dialect) - else: - with open(file_path, 'r', encoding=encoding) as fp: - return cls._from_csv_file(fp, dialect) - - @classmethod - def _from_csv_file( - cls, file: Union[StringIO, TextIOWrapper], dialect: Union[str, csv.Dialect] - ) -> 'DocList': - from docarray import DocList - - rows = csv.DictReader(file, dialect=dialect) - - doc_type = cls.doc_type - docs = DocList.__class_getitem__(doc_type)() - - field_names: List[str] = ( - [] if rows.fieldnames is None else [str(f) for f in rows.fieldnames] - ) - if field_names is None or len(field_names) == 0: - raise TypeError("No field names are given.") - - valid_paths = _all_access_paths_valid( - doc_type=doc_type, access_paths=field_names - ) - if not all(valid_paths): - raise ValueError( - f'Column names do not match the schema of the DocList\'s ' - f'document type ({cls.doc_type.__name__}): ' - f'{list(compress(field_names, [not v for v in valid_paths]))}' - ) - - for access_path2val in rows: - doc_dict: Dict[Any, Any] = _access_path_dict_to_nested_dict(access_path2val) - docs.append(doc_type.parse_obj(doc_dict)) - - return docs - - def to_csv( - self, file_path: str, dialect: Union[str, csv.Dialect] = 'excel' - ) -> None: - """ - Save a `DocList` to a csv file. - The field names will be stored in the first row. Each row corresponds to the - information of one Document. - Columns for nested fields will be named after the "__"-seperated access paths, - such as `'image__url'` for `image.url`. - - :param file_path: path to a csv file. - :param dialect: defines separator and how to handle whitespaces etc. - Can be a [`csv.Dialect`](https://docs.python.org/3/library/csv.html#csv.Dialect) - instance or one string of: - `'excel'` (for comma separated values), - `'excel-tab'` (for tab separated values), - `'unix'` (for csv file generated on UNIX systems). - - """ - if self.doc_type == AnyDoc: - raise TypeError( - 'DocList must be homogeneous to be converted to a csv.' - 'There is no document schema defined. ' - 'Please specify the DocList\'s Document type using `DocList[MyDoc]`.' - ) - fields = self.doc_type._get_access_paths() - - with open(file_path, 'w') as csv_file: - writer = csv.DictWriter(csv_file, fieldnames=fields, dialect=dialect) - writer.writeheader() - - for doc in self: - doc_dict = _dict_to_access_paths(doc.dict()) - writer.writerow(doc_dict) - - @classmethod - def from_dataframe(cls, df: 'pd.DataFrame') -> 'DocList': - """ - Load a `DocList` from a `pandas.DataFrame` following the schema - defined in the [`.doc_type`][docarray.DocList] attribute. - Every row of the dataframe will be mapped to one Document in the doc_list. - The column names of the dataframe have to match the field names of the - Document type. - For nested fields use "__"-separated access paths as column names, - such as `'image__url'`. - - List-like fields (including field of type DocList) are not supported. - - --- - - ```python - import pandas as pd - - from docarray import BaseDoc, DocList - - - class Person(BaseDoc): - name: str - follower: int - - - df = pd.DataFrame( - data=[['Maria', 12345], ['Jake', 54321]], columns=['name', 'follower'] - ) - - docs = DocList[Person].from_dataframe(df) - - assert docs.name == ['Maria', 'Jake'] - assert docs.follower == [12345, 54321] - ``` - - --- - - :param df: `pandas.DataFrame` to extract Document's information from - :return: `DocList` where each Document contains the information of one - corresponding row of the `pandas.DataFrame`. - """ - from docarray import DocList - - if cls.doc_type == AnyDoc: - raise TypeError( - 'There is no document schema defined. ' - 'Please specify the DocList\'s Document type using `DocList[MyDoc]`.' - ) - - doc_type = cls.doc_type - docs = DocList.__class_getitem__(doc_type)() - field_names = df.columns.tolist() - - if field_names is None or len(field_names) == 0: - raise TypeError("No field names are given.") - - valid_paths = _all_access_paths_valid( - doc_type=doc_type, access_paths=field_names - ) - if not all(valid_paths): - raise ValueError( - f'Column names do not match the schema of the DocList\'s ' - f'document type ({cls.doc_type.__name__}): ' - f'{list(compress(field_names, [not v for v in valid_paths]))}' - ) - - for row in df.itertuples(): - access_path2val = row._asdict() - access_path2val.pop('index', None) - doc_dict = _access_path_dict_to_nested_dict(access_path2val) - docs.append(doc_type.parse_obj(doc_dict)) - - return docs - - def to_dataframe(self) -> 'pd.DataFrame': - """ - Save a DocList to a `pandas.DataFrame`. - The field names will be stored as column names. Each row of the dataframe corresponds - to the information of one Document. - Columns for nested fields will be named after the "__"-seperated access paths, - such as `'image__url'` for `image.url`. - - :return: `pandas.DataFrame` - """ - if TYPE_CHECKING: - import pandas as pd - else: - pd = import_library('pandas', raise_error=True) - - if self.doc_type == AnyDoc: - raise TypeError( - 'DocList must be homogeneous to be converted to a DataFrame.' - 'There is no document schema defined. ' - 'Please specify the DocList\'s Document type using `DocList[MyDoc]`.' - ) - - fields = self.doc_type._get_access_paths() - df = pd.DataFrame(columns=fields) - - for doc in self: - doc_dict = _dict_to_access_paths(doc.dict()) - doc_dict = {k: [v] for k, v in doc_dict.items()} - df = pd.concat([df, pd.DataFrame.from_dict(doc_dict)], ignore_index=True) - - return df - - # Methods to load from/to files in different formats - @property - def _stream_header(self) -> bytes: - # Binary format for streaming case - - # V2 DocList streaming serialization format - # | 1 byte | 8 bytes | 4 bytes | variable(docarray v2) | 4 bytes | variable(docarray v2) ... - - # 1 byte (uint8) - version_byte = b'\x02' - # 8 bytes (uint64) - num_docs_as_bytes = len(self).to_bytes(8, 'big', signed=False) - return version_byte + num_docs_as_bytes - - @classmethod - def _load_binary_all( - cls: Type[T], - file_ctx: Union[ContextManager[io.BufferedReader], ContextManager[bytes]], - protocol: Optional[str], - compress: Optional[str], - show_progress: bool, - ): - """Read a `DocList` object from a binary file - :param protocol: protocol to use. It can be 'pickle-array', 'protobuf-array', 'pickle' or 'protobuf' - :param compress: compress algorithm to use between `lz4`, `bz2`, `lzma`, `zlib`, `gzip` - :param show_progress: show progress bar, only works when protocol is `pickle` or `protobuf` - :return: a `DocList` - """ - with file_ctx as fp: - if isinstance(fp, bytes): - d = fp - else: - d = fp.read() - - if protocol is not None and protocol in ( - 'pickle-array', - 'protobuf-array', - 'json-array', - ): - if _get_compress_ctx(algorithm=compress) is not None: - d = _decompress_bytes(d, algorithm=compress) - compress = None - - if protocol is not None and protocol == 'protobuf-array': - from docarray.proto import DocListProto - - dap = DocListProto() - dap.ParseFromString(d) - - return cls.from_protobuf(dap) - elif protocol is not None and protocol == 'pickle-array': - return pickle.loads(d) - - elif protocol is not None and protocol == 'json-array': - return cls.from_json(d) - - # Binary format for streaming case - else: - from rich import filesize - - from docarray.utils._internal.progress_bar import _get_progressbar - - # 1 byte (uint8) - version_num = int.from_bytes(d[0:1], 'big', signed=False) - if version_num != 2: - raise ValueError( - f'Unsupported version number {version_num} in binary format, expected 2' - ) - - # 8 bytes (uint64) - num_docs = int.from_bytes(d[1:9], 'big', signed=False) - - pbar, t = _get_progressbar( - 'Deserializing', disable=not show_progress, total=num_docs - ) - - # this 9 is version + num_docs bytes used - start_pos = 9 - docs = [] - with pbar: - _total_size = 0 - pbar.start_task(t) - - for _ in range(num_docs): - # 4 bytes (uint32) - len_current_doc_in_bytes = int.from_bytes( - d[start_pos : start_pos + 4], 'big', signed=False - ) - start_doc_pos = start_pos + 4 - end_doc_pos = start_doc_pos + len_current_doc_in_bytes - start_pos = end_doc_pos - - # variable length bytes doc - load_protocol: str = protocol or 'protobuf' - doc = cls.doc_type.from_bytes( - d[start_doc_pos:end_doc_pos], - protocol=load_protocol, - compress=compress, - ) - docs.append(doc) - _total_size += len_current_doc_in_bytes - pbar.update( - t, advance=1, total_size=str(filesize.decimal(_total_size)) - ) - return cls(docs) - - @classmethod - def _load_binary_stream( - cls: Type[T], - file_ctx: ContextManager[io.BufferedReader], - protocol: str = 'protobuf', - compress: Optional[str] = None, - show_progress: bool = False, - ) -> Generator['T_doc', None, None]: - """Yield `Document` objects from a binary file - - :param protocol: protocol to use. It can be 'pickle' or 'protobuf' - :param compress: compress algorithm to use between `lz4`, `bz2`, `lzma`, `zlib`, `gzip` - :param show_progress: show progress bar, only works when protocol is `pickle` or `protobuf` - :return: a generator of `Document` objects - """ - - from rich import filesize - - with file_ctx as f: - version_numdocs_lendoc0 = f.read(9) - # 1 byte (uint8) - version_num = int.from_bytes( - version_numdocs_lendoc0[0:1], 'big', signed=False - ) - if version_num != 2: - raise ValueError( - f'Unsupported version number {version_num} in binary format, expected 2' - ) - - # 8 bytes (uint64) - num_docs = int.from_bytes(version_numdocs_lendoc0[1:9], 'big', signed=False) - - if show_progress: - from docarray.utils._internal.progress_bar import _get_progressbar - - pbar, t = _get_progressbar( - 'Deserializing', disable=not show_progress, total=num_docs - ) - else: - from contextlib import nullcontext - - pbar = nullcontext() - - with pbar: - if show_progress: - _total_size = 0 - pbar.start_task(t) - for _ in range(num_docs): - # 4 bytes (uint32) - len_current_doc_in_bytes = int.from_bytes( - f.read(4), 'big', signed=False - ) - load_protocol: str = protocol - yield cls.doc_type.from_bytes( - f.read(len_current_doc_in_bytes), - protocol=load_protocol, - compress=compress, - ) - if show_progress: - _total_size += len_current_doc_in_bytes - pbar.update( - t, advance=1, total_size=str(filesize.decimal(_total_size)) - ) - - @classmethod - def load_binary( - cls: Type[T], - file: Union[str, bytes, pathlib.Path, io.BufferedReader, _LazyRequestReader], - protocol: str = 'protobuf-array', - compress: Optional[str] = None, - show_progress: bool = False, - streaming: bool = False, - ) -> Union[T, Generator['T_doc', None, None]]: - """Load doc_list elements from a compressed binary file. - - In case protocol is pickle the `Documents` are streamed from disk to save memory usage - - !!! note - If `file` is `str` it can specify `protocol` and `compress` as file extensions. - This functionality assumes `file=file_name.$protocol.$compress` where `$protocol` and `$compress` refer to a - string interpolation of the respective `protocol` and `compress` methods. - For example if `file=my_docarray.protobuf.lz4` then the binary data will be loaded assuming `protocol=protobuf` - and `compress=lz4`. - - :param file: File or filename or serialized bytes where the data is stored. - :param protocol: protocol to use. It can be 'pickle-array', 'protobuf-array', 'pickle' or 'protobuf' - :param compress: compress algorithm to use between `lz4`, `bz2`, `lzma`, `zlib`, `gzip` - :param show_progress: show progress bar, only works when protocol is `pickle` or `protobuf` - :param streaming: if `True` returns a generator over `Document` objects. - - :return: a `DocList` object - - """ - load_protocol: Optional[str] = protocol - load_compress: Optional[str] = compress - file_ctx: Union[nullcontext, io.BufferedReader] - if isinstance(file, (io.BufferedReader, _LazyRequestReader, bytes)): - file_ctx = nullcontext(file) - # by checking path existence we allow file to be of type Path, LocalPath, PurePath and str - elif isinstance(file, (str, pathlib.Path)) and os.path.exists(file): - load_protocol, load_compress = _protocol_and_compress_from_file_path( - file, protocol, compress - ) - file_ctx = open(file, 'rb') - else: - raise FileNotFoundError(f'cannot find file {file}') - if streaming: - if load_protocol not in SINGLE_PROTOCOLS: - raise ValueError( - f'`streaming` is only available when using {" or ".join(map(lambda x: f"`{x}`", SINGLE_PROTOCOLS))} as protocol, ' - f'got {load_protocol}' - ) - else: - return cls._load_binary_stream( - file_ctx, - protocol=load_protocol, - compress=load_compress, - show_progress=show_progress, - ) - else: - return cls._load_binary_all( - file_ctx, load_protocol, load_compress, show_progress - ) - - def save_binary( - self, - file: Union[str, pathlib.Path], - protocol: str = 'protobuf-array', - compress: Optional[str] = None, - show_progress: bool = False, - ) -> None: - """Save DocList into a binary file. - - It will use the protocol to pick how to save the DocList. - If used `picke-doc_list` and `protobuf-array` the DocList will be stored - and compressed at complete level using `pickle` or `protobuf`. - When using `protobuf` or `pickle` as protocol each Document in DocList - will be stored individually and this would make it available for streaming. - - !!! note - If `file` is `str` it can specify `protocol` and `compress` as file extensions. - This functionality assumes `file=file_name.$protocol.$compress` where `$protocol` and `$compress` refer to a - string interpolation of the respective `protocol` and `compress` methods. - For example if `file=my_docarray.protobuf.lz4` then the binary data will be created using `protocol=protobuf` - and `compress=lz4`. - - :param file: File or filename to which the data is saved. - :param protocol: protocol to use. It can be 'pickle-array', 'protobuf-array', 'pickle' or 'protobuf' - :param compress: compress algorithm to use between `lz4`, `bz2`, `lzma`, `zlib`, `gzip` - :param show_progress: show progress bar, only works when protocol is `pickle` or `protobuf` - """ - if isinstance(file, io.BufferedWriter): - file_ctx = nullcontext(file) - else: - _protocol, _compress = _protocol_and_compress_from_file_path(file) - - if _protocol is not None: - protocol = _protocol - if _compress is not None: - compress = _compress - - file_ctx = open(file, 'wb') - - self.to_bytes( - protocol=protocol, - compress=compress, - file_ctx=file_ctx, - show_progress=show_progress, - ) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py deleted file mode 100644 index 336c7b254fe392b4703039fec86a83acdbd2e1a5..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py +++ /dev/null @@ -1,35 +0,0 @@ -_base_ = './cityscapes.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (769, 769) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 1025), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/Surn/UnlimitedMusicGen/audiocraft/quantization/core_vq.py b/spaces/Surn/UnlimitedMusicGen/audiocraft/quantization/core_vq.py deleted file mode 100644 index e1896bb1788a945a1f7be6369abb255ecf72c7a0..0000000000000000000000000000000000000000 --- a/spaces/Surn/UnlimitedMusicGen/audiocraft/quantization/core_vq.py +++ /dev/null @@ -1,400 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -from einops import rearrange, repeat -import flashy -import torch -from torch import nn, einsum -import torch.nn.functional as F - - -def exists(val: tp.Optional[tp.Any]) -> bool: - return val is not None - - -def default(val: tp.Any, d: tp.Any) -> tp.Any: - return val if exists(val) else d - - -def l2norm(t): - return F.normalize(t, p=2, dim=-1) - - -def ema_inplace(moving_avg, new, decay: float): - moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) - - -def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5): - return (x + epsilon) / (x.sum() + n_categories * epsilon) - - -def uniform_init(*shape: int): - t = torch.empty(shape) - nn.init.kaiming_uniform_(t) - return t - - -def sample_vectors(samples, num: int): - num_samples, device = samples.shape[0], samples.device - - if num_samples >= num: - indices = torch.randperm(num_samples, device=device)[:num] - else: - indices = torch.randint(0, num_samples, (num,), device=device) - - return samples[indices] - - -def kmeans(samples, num_clusters: int, num_iters: int = 10): - dim, dtype = samples.shape[-1], samples.dtype - - means = sample_vectors(samples, num_clusters) - - for _ in range(num_iters): - diffs = rearrange(samples, "n d -> n () d") - rearrange( - means, "c d -> () c d" - ) - dists = -(diffs ** 2).sum(dim=-1) - - buckets = dists.max(dim=-1).indices - bins = torch.bincount(buckets, minlength=num_clusters) - zero_mask = bins == 0 - bins_min_clamped = bins.masked_fill(zero_mask, 1) - - new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype) - new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples) - new_means = new_means / bins_min_clamped[..., None] - - means = torch.where(zero_mask[..., None], means, new_means) - - return means, bins - - -def orthgonal_loss_fn(t): - # eq (2) from https://arxiv.org/abs/2112.00384 - n = t.shape[0] - normed_codes = l2norm(t) - identity = torch.eye(n, device=t.device) - cosine_sim = einsum("i d, j d -> i j", normed_codes, normed_codes) - return ((cosine_sim - identity) ** 2).sum() / (n ** 2) - - -class EuclideanCodebook(nn.Module): - """Codebook with Euclidean distance. - - Args: - dim (int): Dimension. - codebook_size (int): Codebook size. - kmeans_init (bool): Whether to use k-means to initialize the codebooks. - If set to true, run the k-means algorithm on the first training batch and use - the learned centroids as initialization. - kmeans_iters (int): Number of iterations used for k-means algorithm at initialization. - decay (float): Decay for exponential moving average over the codebooks. - epsilon (float): Epsilon value for numerical stability. - threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes - that have an exponential moving average cluster size less than the specified threshold with - randomly selected vector from the current batch. - """ - def __init__( - self, - dim: int, - codebook_size: int, - kmeans_init: int = False, - kmeans_iters: int = 10, - decay: float = 0.8, - epsilon: float = 1e-5, - threshold_ema_dead_code: int = 2, - ): - super().__init__() - self.decay = decay - init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros - embed = init_fn(codebook_size, dim) - - self.codebook_size = codebook_size - - self.kmeans_iters = kmeans_iters - self.epsilon = epsilon - self.threshold_ema_dead_code = threshold_ema_dead_code - - self.register_buffer("inited", torch.Tensor([not kmeans_init])) - self.register_buffer("cluster_size", torch.zeros(codebook_size)) - self.register_buffer("embed", embed) - self.register_buffer("embed_avg", embed.clone()) - - @torch.jit.ignore - def init_embed_(self, data): - if self.inited: - return - - embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters) - self.embed.data.copy_(embed) - self.embed_avg.data.copy_(embed.clone()) - self.cluster_size.data.copy_(cluster_size) - self.inited.data.copy_(torch.Tensor([True])) - # Make sure all buffers across workers are in sync after initialization - flashy.distrib.broadcast_tensors(self.buffers()) - - def replace_(self, samples, mask): - modified_codebook = torch.where( - mask[..., None], sample_vectors(samples, self.codebook_size), self.embed - ) - self.embed.data.copy_(modified_codebook) - - def expire_codes_(self, batch_samples): - if self.threshold_ema_dead_code == 0: - return - - expired_codes = self.cluster_size < self.threshold_ema_dead_code - if not torch.any(expired_codes): - return - - batch_samples = rearrange(batch_samples, "... d -> (...) d") - self.replace_(batch_samples, mask=expired_codes) - flashy.distrib.broadcast_tensors(self.buffers()) - - def preprocess(self, x): - x = rearrange(x, "... d -> (...) d") - return x - - def quantize(self, x): - embed = self.embed.t() - dist = -( - x.pow(2).sum(1, keepdim=True) - - 2 * x @ embed - + embed.pow(2).sum(0, keepdim=True) - ) - embed_ind = dist.max(dim=-1).indices - return embed_ind - - def postprocess_emb(self, embed_ind, shape): - return embed_ind.view(*shape[:-1]) - - def dequantize(self, embed_ind): - quantize = F.embedding(embed_ind, self.embed) - return quantize - - def encode(self, x): - shape = x.shape - # pre-process - x = self.preprocess(x) - # quantize - embed_ind = self.quantize(x) - # post-process - embed_ind = self.postprocess_emb(embed_ind, shape) - return embed_ind - - def decode(self, embed_ind): - quantize = self.dequantize(embed_ind) - return quantize - - def forward(self, x): - shape, dtype = x.shape, x.dtype - x = self.preprocess(x) - self.init_embed_(x) - - embed_ind = self.quantize(x) - embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype) - embed_ind = self.postprocess_emb(embed_ind, shape) - quantize = self.dequantize(embed_ind) - - if self.training: - # We do the expiry of code at that point as buffers are in sync - # and all the workers will take the same decision. - self.expire_codes_(x) - ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay) - embed_sum = x.t() @ embed_onehot - ema_inplace(self.embed_avg, embed_sum.t(), self.decay) - cluster_size = ( - laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon) - * self.cluster_size.sum() - ) - embed_normalized = self.embed_avg / cluster_size.unsqueeze(1) - self.embed.data.copy_(embed_normalized) - - return quantize, embed_ind - - -class VectorQuantization(nn.Module): - """Vector quantization implementation. - Currently supports only euclidean distance. - - Args: - dim (int): Dimension - codebook_size (int): Codebook size - codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim. - decay (float): Decay for exponential moving average over the codebooks. - epsilon (float): Epsilon value for numerical stability. - kmeans_init (bool): Whether to use kmeans to initialize the codebooks. - kmeans_iters (int): Number of iterations used for kmeans initialization. - threshold_ema_dead_code (int): - channels_last (bool): Channels are the last dimension in the input tensors. - commitment_weight (float): Weight for commitment loss. - orthogonal_reg_weight (float): Orthogonal regularization weights. - orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes. - orthogonal_reg_max_codes (optional int): Maximum number of codes to consider - for orthogonal regulariation. - threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes - that have an exponential moving average cluster size less than the specified threshold with - randomly selected vector from the current batch. - """ - def __init__( - self, - dim: int, - codebook_size: int, - codebook_dim: tp.Optional[int] = None, - decay: float = 0.8, - epsilon: float = 1e-5, - kmeans_init: bool = False, - kmeans_iters: int = 10, - threshold_ema_dead_code: int = 2, - channels_last: bool = False, - commitment_weight: float = 1., - orthogonal_reg_weight: float = 0.0, - orthogonal_reg_active_codes_only: bool = False, - orthogonal_reg_max_codes: tp.Optional[int] = None, - ): - super().__init__() - _codebook_dim: int = default(codebook_dim, dim) - - requires_projection = _codebook_dim != dim - self.project_in = (nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity()) - self.project_out = (nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity()) - - self.epsilon = epsilon - self.commitment_weight = commitment_weight - - self.orthogonal_reg_weight = orthogonal_reg_weight - self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only - self.orthogonal_reg_max_codes = orthogonal_reg_max_codes - - self._codebook = EuclideanCodebook(dim=_codebook_dim, codebook_size=codebook_size, - kmeans_init=kmeans_init, kmeans_iters=kmeans_iters, - decay=decay, epsilon=epsilon, - threshold_ema_dead_code=threshold_ema_dead_code) - self.codebook_size = codebook_size - - self.channels_last = channels_last - - @property - def codebook(self): - return self._codebook.embed - - @property - def inited(self): - return self._codebook.inited - - def _preprocess(self, x): - if not self.channels_last: - x = rearrange(x, "b d n -> b n d") - return x - - def _postprocess(self, quantize): - if not self.channels_last: - quantize = rearrange(quantize, "b n d -> b d n") - return quantize - - def encode(self, x): - x = self._preprocess(x) - x = self.project_in(x) - embed_in = self._codebook.encode(x) - return embed_in - - def decode(self, embed_ind): - quantize = self._codebook.decode(embed_ind) - quantize = self.project_out(quantize) - quantize = self._postprocess(quantize) - return quantize - - def forward(self, x): - device = x.device - x = self._preprocess(x) - - x = self.project_in(x) - quantize, embed_ind = self._codebook(x) - - if self.training: - quantize = x + (quantize - x).detach() - - loss = torch.tensor([0.0], device=device, requires_grad=self.training) - - if self.training: - if self.commitment_weight > 0: - commit_loss = F.mse_loss(quantize.detach(), x) - loss = loss + commit_loss * self.commitment_weight - - if self.orthogonal_reg_weight > 0: - codebook = self.codebook - - if self.orthogonal_reg_active_codes_only: - # only calculate orthogonal loss for the activated codes for this batch - unique_code_ids = torch.unique(embed_ind) - codebook = codebook[unique_code_ids] - - num_codes = codebook.shape[0] - if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes: - rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes] - codebook = codebook[rand_ids] - - orthogonal_reg_loss = orthgonal_loss_fn(codebook) - loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight - - quantize = self.project_out(quantize) - quantize = self._postprocess(quantize) - - return quantize, embed_ind, loss - - -class ResidualVectorQuantization(nn.Module): - """Residual vector quantization implementation. - - Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf - """ - def __init__(self, *, num_quantizers, **kwargs): - super().__init__() - self.layers = nn.ModuleList( - [VectorQuantization(**kwargs) for _ in range(num_quantizers)] - ) - - def forward(self, x, n_q: tp.Optional[int] = None): - quantized_out = 0.0 - residual = x - - all_losses = [] - all_indices = [] - - n_q = n_q or len(self.layers) - - for i, layer in enumerate(self.layers[:n_q]): - quantized, indices, loss = layer(residual) - residual = residual - quantized - quantized_out = quantized_out + quantized - all_indices.append(indices) - all_losses.append(loss) - - out_losses, out_indices = map(torch.stack, (all_losses, all_indices)) - return quantized_out, out_indices, out_losses - - def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None) -> torch.Tensor: - residual = x - all_indices = [] - n_q = n_q or len(self.layers) - for layer in self.layers[:n_q]: - indices = layer.encode(residual) - quantized = layer.decode(indices) - residual = residual - quantized - all_indices.append(indices) - out_indices = torch.stack(all_indices) - return out_indices - - def decode(self, q_indices: torch.Tensor) -> torch.Tensor: - quantized_out = torch.tensor(0.0, device=q_indices.device) - for i, indices in enumerate(q_indices): - layer = self.layers[i] - quantized = layer.decode(indices) - quantized_out = quantized_out + quantized - return quantized_out diff --git a/spaces/TH5314/newbing/src/components/welcome-screen.tsx b/spaces/TH5314/newbing/src/components/welcome-screen.tsx deleted file mode 100644 index f7449fcbb6c621875e235db98f2790bf7894fb0a..0000000000000000000000000000000000000000 --- a/spaces/TH5314/newbing/src/components/welcome-screen.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { useBing } from '@/lib/hooks/use-bing' - -const exampleMessages = [ - { - heading: '🧐 提出复杂问题', - message: `我可以为我挑剔的只吃橙色食物的孩子做什么饭?` - }, - { - heading: '🙌 获取更好的答案', - message: '销量最高的 3 种宠物吸尘器有哪些优点和缺点?' - }, - { - heading: '🎨 获得创意灵感', - message: `以海盗的口吻写一首关于外太空鳄鱼的俳句` - } -] - -export function WelcomeScreen({ setInput }: Pick, 'setInput'>) { - return ( -
- {exampleMessages.map(example => ( - - ))} -
- ) -} diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py deleted file mode 100644 index b206692a0a976d8336e3f5896eadf4765a33fb2c..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py +++ /dev/null @@ -1,141 +0,0 @@ -from typing import FrozenSet, Iterable, Optional, Tuple, Union - -from pip._vendor.packaging.specifiers import SpecifierSet -from pip._vendor.packaging.utils import NormalizedName, canonicalize_name -from pip._vendor.packaging.version import LegacyVersion, Version - -from pip._internal.models.link import Link, links_equivalent -from pip._internal.req.req_install import InstallRequirement -from pip._internal.utils.hashes import Hashes - -CandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]] -CandidateVersion = Union[LegacyVersion, Version] - - -def format_name(project: str, extras: FrozenSet[str]) -> str: - if not extras: - return project - canonical_extras = sorted(canonicalize_name(e) for e in extras) - return "{}[{}]".format(project, ",".join(canonical_extras)) - - -class Constraint: - def __init__( - self, specifier: SpecifierSet, hashes: Hashes, links: FrozenSet[Link] - ) -> None: - self.specifier = specifier - self.hashes = hashes - self.links = links - - @classmethod - def empty(cls) -> "Constraint": - return Constraint(SpecifierSet(), Hashes(), frozenset()) - - @classmethod - def from_ireq(cls, ireq: InstallRequirement) -> "Constraint": - links = frozenset([ireq.link]) if ireq.link else frozenset() - return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links) - - def __bool__(self) -> bool: - return bool(self.specifier) or bool(self.hashes) or bool(self.links) - - def __and__(self, other: InstallRequirement) -> "Constraint": - if not isinstance(other, InstallRequirement): - return NotImplemented - specifier = self.specifier & other.specifier - hashes = self.hashes & other.hashes(trust_internet=False) - links = self.links - if other.link: - links = links.union([other.link]) - return Constraint(specifier, hashes, links) - - def is_satisfied_by(self, candidate: "Candidate") -> bool: - # Reject if there are any mismatched URL constraints on this package. - if self.links and not all(_match_link(link, candidate) for link in self.links): - return False - # We can safely always allow prereleases here since PackageFinder - # already implements the prerelease logic, and would have filtered out - # prerelease candidates if the user does not expect them. - return self.specifier.contains(candidate.version, prereleases=True) - - -class Requirement: - @property - def project_name(self) -> NormalizedName: - """The "project name" of a requirement. - - This is different from ``name`` if this requirement contains extras, - in which case ``name`` would contain the ``[...]`` part, while this - refers to the name of the project. - """ - raise NotImplementedError("Subclass should override") - - @property - def name(self) -> str: - """The name identifying this requirement in the resolver. - - This is different from ``project_name`` if this requirement contains - extras, where ``project_name`` would not contain the ``[...]`` part. - """ - raise NotImplementedError("Subclass should override") - - def is_satisfied_by(self, candidate: "Candidate") -> bool: - return False - - def get_candidate_lookup(self) -> CandidateLookup: - raise NotImplementedError("Subclass should override") - - def format_for_error(self) -> str: - raise NotImplementedError("Subclass should override") - - -def _match_link(link: Link, candidate: "Candidate") -> bool: - if candidate.source_link: - return links_equivalent(link, candidate.source_link) - return False - - -class Candidate: - @property - def project_name(self) -> NormalizedName: - """The "project name" of the candidate. - - This is different from ``name`` if this candidate contains extras, - in which case ``name`` would contain the ``[...]`` part, while this - refers to the name of the project. - """ - raise NotImplementedError("Override in subclass") - - @property - def name(self) -> str: - """The name identifying this candidate in the resolver. - - This is different from ``project_name`` if this candidate contains - extras, where ``project_name`` would not contain the ``[...]`` part. - """ - raise NotImplementedError("Override in subclass") - - @property - def version(self) -> CandidateVersion: - raise NotImplementedError("Override in subclass") - - @property - def is_installed(self) -> bool: - raise NotImplementedError("Override in subclass") - - @property - def is_editable(self) -> bool: - raise NotImplementedError("Override in subclass") - - @property - def source_link(self) -> Optional[Link]: - raise NotImplementedError("Override in subclass") - - def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]: - raise NotImplementedError("Override in subclass") - - def get_install_requirement(self) -> Optional[InstallRequirement]: - raise NotImplementedError("Override in subclass") - - def format_for_error(self) -> str: - raise NotImplementedError("Subclass should override") diff --git a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/torch_impl/torch_attention_pseudo3d.py b/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/torch_impl/torch_attention_pseudo3d.py deleted file mode 100644 index 9f6fd30c932b21e58fba730c3a6d7604f4631a97..0000000000000000000000000000000000000000 --- a/spaces/TempoFunk/makeavid-sd-jax/makeavid_sd/torch_impl/torch_attention_pseudo3d.py +++ /dev/null @@ -1,294 +0,0 @@ -from typing import Optional - -import torch -import torch.nn.functional as F -from torch import nn - -from einops import rearrange - -from diffusers.models.attention_processor import Attention as CrossAttention -#from torch_cross_attention import CrossAttention - - -class TransformerPseudo3DModelOutput: - def __init__(self, sample: torch.FloatTensor) -> None: - self.sample = sample - - -class TransformerPseudo3DModel(nn.Module): - def __init__(self, - num_attention_heads: int = 16, - attention_head_dim: int = 88, - in_channels: Optional[int] = None, - num_layers: int = 1, - dropout: float = 0.0, - norm_num_groups: int = 32, - cross_attention_dim: Optional[int] = None, - attention_bias: bool = False - ) -> None: - super().__init__() - self.num_attention_heads = num_attention_heads - self.attention_head_dim = attention_head_dim - inner_dim = num_attention_heads * attention_head_dim - - # 1. Transformer2DModel can process both standard continous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` - # Define whether input is continuous or discrete depending on configuration - # its continuous - - # 2. Define input layers - self.in_channels = in_channels - - self.norm = torch.nn.GroupNorm( - num_groups = norm_num_groups, - num_channels = in_channels, - eps = 1e-6, - affine = True - ) - self.proj_in = nn.Conv2d( - in_channels, - inner_dim, - kernel_size = 1, - stride = 1, - padding = 0 - ) - - # 3. Define transformers blocks - self.transformer_blocks = nn.ModuleList( - [ - BasicTransformerBlock( - inner_dim, - num_attention_heads, - attention_head_dim, - dropout = dropout, - cross_attention_dim = cross_attention_dim, - attention_bias = attention_bias, - ) - for _ in range(num_layers) - ] - ) - - # 4. Define output layers - self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size = 1, stride = 1, padding = 0) - - def forward(self, - hidden_states: torch.Tensor, - encoder_hidden_states: Optional[torch.Tensor] = None, - timestep: torch.long = None - ) -> TransformerPseudo3DModelOutput: - """ - Args: - hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. - When continous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input - hidden_states - encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, context dim)`, *optional*): - Conditional embeddings for cross attention layer. If not given, cross-attention defaults to - self-attention. - timestep ( `torch.long`, *optional*): - Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. - - Returns: - [`~models.attention.Transformer2DModelOutput`] or `tuple`: [`~models.attention.Transformer2DModelOutput`] - if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample - tensor. - """ - b, c, *_, h, w = hidden_states.shape - is_video = hidden_states.ndim == 5 - f = None - if is_video: - b, c, f, h, w = hidden_states.shape - hidden_states = rearrange(hidden_states, 'b c f h w -> (b f) c h w') - #encoder_hidden_states = encoder_hidden_states.repeat_interleave(f, 0) - - # 1. Input - batch, channel, height, weight = hidden_states.shape - residual = hidden_states - hidden_states = self.norm(hidden_states) - hidden_states = self.proj_in(hidden_states) - inner_dim = hidden_states.shape[1] - hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim) - - # 2. Blocks - for block in self.transformer_blocks: - hidden_states = block( - hidden_states, - context = encoder_hidden_states, - timestep = timestep, - frames_length = f, - height = height, - weight = weight - ) - - # 3. Output - hidden_states = hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2) - hidden_states = self.proj_out(hidden_states) - output = hidden_states + residual - - if is_video: - output = rearrange(output, '(b f) c h w -> b c f h w', b = b) - - return TransformerPseudo3DModelOutput(sample = output) - - - -class BasicTransformerBlock(nn.Module): - r""" - A basic Transformer block. - - Parameters: - dim (`int`): The number of channels in the input and output. - num_attention_heads (`int`): The number of heads to use for multi-head attention. - attention_head_dim (`int`): The number of channels in each head. - dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. - cross_attention_dim (`int`, *optional*): The size of the context vector for cross attention. - num_embeds_ada_norm (: - obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. - attention_bias (: - obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. - """ - - def __init__(self, - dim: int, - num_attention_heads: int, - attention_head_dim: int, - dropout: float = 0.0, - cross_attention_dim: Optional[int] = None, - attention_bias: bool = False, - ) -> None: - super().__init__() - self.attn1 = CrossAttention( - query_dim = dim, - heads = num_attention_heads, - dim_head = attention_head_dim, - dropout = dropout, - bias = attention_bias - ) # is a self-attention - self.ff = FeedForward(dim, dropout = dropout) - self.attn2 = CrossAttention( - query_dim = dim, - cross_attention_dim = cross_attention_dim, - heads = num_attention_heads, - dim_head = attention_head_dim, - dropout = dropout, - bias = attention_bias - ) # is self-attn if context is none - self.attn_temporal = CrossAttention( - query_dim = dim, - heads = num_attention_heads, - dim_head = attention_head_dim, - dropout = dropout, - bias = attention_bias - ) # is a self-attention - - # layer norms - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm_temporal = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - - def forward(self, - hidden_states: torch.Tensor, - context: Optional[torch.Tensor] = None, - timestep: torch.int64 = None, - frames_length: Optional[int] = None, - height: Optional[int] = None, - weight: Optional[int] = None - ) -> torch.Tensor: - if context is not None and frames_length is not None: - context = context.repeat_interleave(frames_length, 0) - # 1. Self-Attention - norm_hidden_states = ( - self.norm1(hidden_states) - ) - hidden_states = self.attn1(norm_hidden_states) + hidden_states - - # 2. Cross-Attention - norm_hidden_states = ( - self.norm2(hidden_states) - ) - hidden_states = self.attn2( - norm_hidden_states, - encoder_hidden_states = context - ) + hidden_states - - # append temporal attention - if frames_length is not None: - hidden_states = rearrange( - hidden_states, - '(b f) (h w) c -> (b h w) f c', - f = frames_length, - h = height, - w = weight - ) - norm_hidden_states = ( - self.norm_temporal(hidden_states) - ) - hidden_states = self.attn_temporal(norm_hidden_states) + hidden_states - hidden_states = rearrange( - hidden_states, - '(b h w) f c -> (b f) (h w) c', - f = frames_length, - h = height, - w = weight - ) - - # 3. Feed-forward - hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states - return hidden_states - - -class FeedForward(nn.Module): - r""" - A feed-forward layer. - - Parameters: - dim (`int`): The number of channels in the input. - dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. - mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. - dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. - """ - - def __init__(self, - dim: int, - dim_out: Optional[int] = None, - mult: int = 4, - dropout: float = 0.0 - ) -> None: - super().__init__() - inner_dim = int(dim * mult) - dim_out = dim_out if dim_out is not None else dim - - geglu = GEGLU(dim, inner_dim) - - self.net = nn.ModuleList([]) - # project in - self.net.append(geglu) - # project dropout - self.net.append(nn.Dropout(dropout)) - # project out - self.net.append(nn.Linear(inner_dim, dim_out)) - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - for module in self.net: - hidden_states = module(hidden_states) - return hidden_states - - -# feedforward -class GEGLU(nn.Module): - r""" - A variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202. - - Parameters: - dim_in (`int`): The number of channels in the input. - dim_out (`int`): The number of channels in the output. - """ - - def __init__(self, dim_in: int, dim_out: int) -> None: - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states, gate = self.proj(hidden_states).chunk(2, dim = -1) - return hidden_states * F.gelu(gate) diff --git a/spaces/TeraTTS/TTS/app.py b/spaces/TeraTTS/TTS/app.py deleted file mode 100644 index 958ac40cfee33adc55ca0c45afd26ca2c893643b..0000000000000000000000000000000000000000 --- a/spaces/TeraTTS/TTS/app.py +++ /dev/null @@ -1,39 +0,0 @@ -import gradio as gr -from infer_onnx import TTS -from ruaccent import RUAccent # https://huggingface.co/TeraTTS/accentuator - -# Заголовок и ссылка на репозиторий с моделями -title = "GitHub with models: https://github.com/Tera2Space/RUTTS" - -# Список моделей TTS для выбора -models = ["TeraTTS/natasha-g2p-vits", "TeraTTS/glados2-g2p-vits", "TeraTTS/glados-g2p-vits", "TeraTTS/girl_nice-g2p-vits"] - -# Создаем словарь моделей и инициализируем их -models = {k: TTS(k) for k in models} - -# Создаем объект для акцентуации текста -accentizer = RUAccent(workdir="./model/ruaccent") -accentizer.load(omograph_model_size='big_poetry', use_dictionary=True) - -# Функция для предобработки текста (акцентуация и ё) - -# Функция для синтеза речи -def text_to_speech(model_name, length_scale, text, prep_text): - if prep_text: - text = accentizer.process_all(text) - audio = models[model_name](text, length_scale=length_scale) - models[model_name].save_wav(audio, 'temp.wav', sample_rate=models[model_name].config["samplerate"]) - - return 'temp.wav', f"Обработанный текст: '{text}'" - -# Создание интерфейса Gradio -model_choice = gr.Dropdown(choices=list(models.keys()), value="TeraTTS/natasha-g2p-vits", label="Выберите модель") -input_text = gr.Textbox(label="Введите текст для синтеза речи") -prep_text = gr.Checkbox(label="Предобработать", info="Хотите предобработать текст? (ударения, ё)", value=True) -length_scale = gr.Slider(minimum=0.1, maximum=2.0, label="Length scale (увеличить длину звучания) По умолчанию: 1.2", value=1.2) - -output_audio = gr.Audio(label="Аудио", type="numpy") -output_text = gr.Textbox(label="Обработанный текст") - -iface = gr.Interface(fn=text_to_speech, inputs=[model_choice, length_scale, input_text, prep_text], outputs=[output_audio, output_text], title=title) -iface.launch() \ No newline at end of file diff --git a/spaces/Tihsrah/Meetings/app.py b/spaces/Tihsrah/Meetings/app.py deleted file mode 100644 index 0e379941dc3161317b760a3f7593dc12f0d3f1c3..0000000000000000000000000000000000000000 --- a/spaces/Tihsrah/Meetings/app.py +++ /dev/null @@ -1,214 +0,0 @@ -import subprocess -# # Run the pip install command -subprocess.check_call(['pip', 'install', 'wordcloud']) -subprocess.check_call(['pip', 'install', 'git+https://github.com/openai/whisper.git']) -subprocess.check_call(['pip', 'install', 'transformers']) -subprocess.check_call(['pip', 'install', 'imageio==2.4.1']) -subprocess.check_call(['pip', 'install', 'moviepy']) -subprocess.check_call(['pip', 'install', 'keybert']) -subprocess.check_call(['pip', 'install', 'pytube']) - -import streamlit as st -import os -from wordcloud import WordCloud -from keybert import KeyBERT -import pandas as pd -import matplotlib.pyplot as plt -# ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - - -from moviepy.editor import * -from tqdm import tqdm -import os -import math -import nltk -nltk.download('punkt') -import whisper -from transformers import pipeline - -from pytube import YouTube -def process_video(path): - whisper_model = whisper.load_model("base") - - def SpeechToTextEng(aud_path): - result = whisper_model.transcribe(aud_path) - return result["text"] - - def run_range(duration): - time=duration/60 - floor=math.ceil(time) - return floor - - time_range=60 - clip_run_range=0 - clip_duration=0 - - def audio_generator(path,aud=0,vid=0): - if vid==1: - clip=VideoFileClip(path) - clip_duration = clip.duration - clip_run_range=run_range(clip_duration) - for i in range(clip_run_range): - left=i*time_range - right=left+time_range - # print(left,right) - - crop_clip=clip.subclip(left,right) - try: - crop_clip.audio.write_audiofile("vid_to_aud"+str(i)+".mp3") - except: - pass - - if aud==1: - audio_clip=AudioFileClip(path) - clip_duration = audio_clip.duration - print(clip_duration) - clip_run_range=run_range(clip_duration) - print(clip_run_range) - for i in range(clip_run_range): - left=i*time_range - right=left+time_range - # print(left,right) - crop_clip=audio_clip.subclip(left,right) - try: - crop_clip.write_audiofile("vid_to_aud"+str(i)+".mp3") - except: - pass - - - - - # YouTube video URL - video_url = path - - # Create a YouTube object - yt = YouTube(video_url) - - # Get the highest resolution video stream - stream = yt.streams.get_lowest_resolution() - - # Download the video - stream.download(filename='meeting.mp4') - - audio_generator("./meeting.mp4",vid=1) - transcribed_lit=[] - label_lit=[] - translated_lit=[] - - for i in tqdm(range(clip_run_range)): - transcribed=SpeechToTextEng("./vid_to_aud"+str(i)+".mp3") - transcribed_lit.append(transcribed) - os.remove("./vid_to_aud"+str(i)+".mp3") - - - data = pd.DataFrame( - {'transcriptions': transcribed_lit - }) - - summarizer = pipeline("summarization") - - sentiment_analyzer = pipeline("sentiment-analysis") - - sumarized_lit=[] - sentiment_lit=[] - for i in tqdm(range(len(data))): - summarized=summarizer(data.iloc[i,0],min_length=75, max_length=300)[0]['summary_text'] - sentiment = sentiment_analyzer(data.iloc[i,0])[0]['label'] - sumarized_lit.append(summarized) - sentiment_lit.append(sentiment) - - data['summary']=sumarized_lit - data['sentiment']=sentiment_lit - data.to_csv('output2.csv', index=False) - tot_text="" - for i in range(len(data)): - tot_text=tot_text+data.iloc[i,0] - - key_model = KeyBERT('distilbert-base-nli-mean-tokens') - def extract_keywords(text, top_n=50): - keywords = key_model.extract_keywords(text, top_n=top_n) - return [keyword[0] for keyword in keywords] - - tot_keywords=extract_keywords(tot_text) - - def get_500_words(text,left,right): - words = text.split() - first_500_words = ' '.join(words[left:right]) - return first_500_words - - def summarize_text(text): - chunk_size = 500 # Number of words per chunk - total_summary = "" # Total summary - - words = text.split() # Split the text into individual words - num_chunks = len(words) // chunk_size + 1 # Calculate the number of chunks - - for i in tqdm(range(num_chunks)): - start_index = i * chunk_size - end_index = start_index + chunk_size - chunk = " ".join(words[start_index:end_index]) - - # Pass the chunk to the summarizer (replace with your summarization code) - chunk_summary = summarizer(chunk,min_length=75, max_length=200)[0]['summary_text'] - # print(chunk_summary) - total_summary += chunk_summary - - return total_summary - - tot_summary=summarize_text(tot_text) - return tot_text,tot_summary,tot_keywords - - - - -# ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -def generate_word_cloud(text): - # Create a WordCloud object - wordcloud = WordCloud(width=800, height=400, background_color='white').generate(text) - - # Display the generated word cloud - fig, ax = plt.subplots(figsize=(10, 5)) - - # Plot the word cloud on the axis - ax.imshow(wordcloud, interpolation='bilinear') - ax.axis('off') - st.pyplot(fig) - - -def main(): - st.title("Meeting Summary Web App") - - # YouTube link input - youtube_url = st.text_input("Enter the YouTube video link") - - if st.button("Process Video"): - if youtube_url: - # Process the YouTube video - tot_text, tot_summary, tot_keywords = process_video(youtube_url) - - # Display the output - if os.path.exists("output2.csv"): - output_df = pd.read_csv("output2.csv") - st.subheader("Transcriptions:") - st.write(output_df["transcriptions"]) - - st.subheader("Labels:") - st.write(output_df["labels"]) - - st.subheader("Word Cloud:") - generate_word_cloud(output_df["transcriptions"].str.cat(sep=' ')) - - st.subheader("tot_text:") - st.write(tot_text) - - st.subheader("tot_summary:") - st.write(tot_summary) - - st.subheader("tot_keywords:") - st.write(tot_keywords) - - else: - st.write("No output file found.") - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/Tuana/find-the-animal/README.md b/spaces/Tuana/find-the-animal/README.md deleted file mode 100644 index 09b19eaa56943906c9d52761cc8b99e5e7784160..0000000000000000000000000000000000000000 --- a/spaces/Tuana/find-the-animal/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: MultiModalRetrieval for Image Search -emoji: 🦒 -colorFrom: green -colorTo: yellow -sdk: streamlit -sdk_version: 1.10.0 -app_file: 🏡_Home.py -pinned: false ---- \ No newline at end of file diff --git a/spaces/VideoCrafter/VideoCrafter/lvdm/distributions.py b/spaces/VideoCrafter/VideoCrafter/lvdm/distributions.py deleted file mode 100644 index 0b69b6984880ec24279b658384ed8031335e3474..0000000000000000000000000000000000000000 --- a/spaces/VideoCrafter/VideoCrafter/lvdm/distributions.py +++ /dev/null @@ -1,95 +0,0 @@ -import torch -import numpy as np - - -class AbstractDistribution: - def sample(self): - raise NotImplementedError() - - def mode(self): - raise NotImplementedError() - - -class DiracDistribution(AbstractDistribution): - def __init__(self, value): - self.value = value - - def sample(self): - return self.value - - def mode(self): - return self.value - - -class DiagonalGaussianDistribution(object): - def __init__(self, parameters, deterministic=False): - self.parameters = parameters - self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) - self.logvar = torch.clamp(self.logvar, -30.0, 20.0) - self.deterministic = deterministic - self.std = torch.exp(0.5 * self.logvar) - self.var = torch.exp(self.logvar) - if self.deterministic: - self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) - - def sample(self, noise=None): - if noise is None: - noise = torch.randn(self.mean.shape) - - x = self.mean + self.std * noise.to(device=self.parameters.device) - return x - - def kl(self, other=None): - if self.deterministic: - return torch.Tensor([0.]) - else: - if other is None: - return 0.5 * torch.sum(torch.pow(self.mean, 2) - + self.var - 1.0 - self.logvar, - dim=[1, 2, 3]) - else: - return 0.5 * torch.sum( - torch.pow(self.mean - other.mean, 2) / other.var - + self.var / other.var - 1.0 - self.logvar + other.logvar, - dim=[1, 2, 3]) - - def nll(self, sample, dims=[1,2,3]): - if self.deterministic: - return torch.Tensor([0.]) - logtwopi = np.log(2.0 * np.pi) - return 0.5 * torch.sum( - logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, - dim=dims) - - def mode(self): - return self.mean - - -def normal_kl(mean1, logvar1, mean2, logvar2): - """ - source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 - Compute the KL divergence between two gaussians. - Shapes are automatically broadcasted, so batches can be compared to - scalars, among other use cases. - """ - tensor = None - for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, torch.Tensor): - tensor = obj - break - assert tensor is not None, "at least one argument must be a Tensor" - - # Force variances to be Tensors. Broadcasting helps convert scalars to - # Tensors, but it does not work for torch.exp(). - logvar1, logvar2 = [ - x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) - for x in (logvar1, logvar2) - ] - - return 0.5 * ( - -1.0 - + logvar2 - - logvar1 - + torch.exp(logvar1 - logvar2) - + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) - ) diff --git a/spaces/WAT-ai-AA/stable-diffused-adversarial-attacks/model.py b/spaces/WAT-ai-AA/stable-diffused-adversarial-attacks/model.py deleted file mode 100644 index cc8c50c4265a547ef9991d3899937cbffeb2112a..0000000000000000000000000000000000000000 --- a/spaces/WAT-ai-AA/stable-diffused-adversarial-attacks/model.py +++ /dev/null @@ -1,78 +0,0 @@ -import torch.nn as nn - - -def conv3x3(in_channels, out_channels, stride=1): - return nn.Conv2d( - in_channels, - out_channels, - kernel_size=3, - stride=stride, - padding=1, - bias=False, - ) - - -class ResidualBlock(nn.Module): - def __init__(self, in_channels, out_channels, stride=1, downsample=None): - super(ResidualBlock, self).__init__() - self.conv1 = conv3x3(in_channels, out_channels, stride) - self.bn1 = nn.BatchNorm2d(out_channels) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(out_channels, out_channels) - self.bn2 = nn.BatchNorm2d(out_channels) - self.downsample = downsample - - def forward(self, x): - residual = x - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - out = self.conv2(out) - out = self.bn2(out) - if self.downsample: - residual = self.downsample(x) - out += residual - out = self.relu(out) - return out - - -class ResNet(nn.Module): - def __init__(self, block, layers, num_classes=10): - super(ResNet, self).__init__() - self.in_channels = 16 - self.conv = conv3x3(3, 16) - self.bn = nn.BatchNorm2d(16) - self.relu = nn.ReLU(inplace=True) - self.layer1 = self.make_layer(block, 16, layers[0]) - self.layer2 = self.make_layer(block, 32, layers[1], 2) - self.layer3 = self.make_layer(block, 64, layers[2], 2) - self.avg_pool = nn.AvgPool2d(8) - self.fc = nn.Linear(64, num_classes) - - def make_layer(self, block, out_channels, blocks, stride=1): - downsample = None - if (stride != 1) or (self.in_channels != out_channels): - downsample = nn.Sequential( - conv3x3(self.in_channels, out_channels, stride=stride), - nn.BatchNorm2d(out_channels), - ) - layers = [] - layers.append( - block(self.in_channels, out_channels, stride, downsample) - ) - self.in_channels = out_channels - for i in range(1, blocks): - layers.append(block(out_channels, out_channels)) - return nn.Sequential(*layers) - - def forward(self, x): - out = self.conv(x) - out = self.bn(out) - out = self.relu(out) - out = self.layer1(out) - out = self.layer2(out) - out = self.layer3(out) - out = self.avg_pool(out) - out = out.view(out.size(0), -1) - out = self.fc(out) - return out diff --git a/spaces/Wrathless/Dkrotzer-MusicalMagic/README.md b/spaces/Wrathless/Dkrotzer-MusicalMagic/README.md deleted file mode 100644 index 5ed3b1f58120772f839d8a172a943bfd63818fd4..0000000000000000000000000000000000000000 --- a/spaces/Wrathless/Dkrotzer-MusicalMagic/README.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: MusicGen -python_version: '3.9' -tags: -- music generation -- language models -- LLMs -app_file: app.py -emoji: 🎵 -colorFrom: white -colorTo: blue -sdk: gradio -sdk_version: 3.34.0 -pinned: true -suggested_hardware: a10g-large -license: cc-by-nc-4.0 -duplicated_from: musicgen/MusicGen ---- -# Audiocraft -![docs badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_docs/badge.svg) -![linter badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_linter/badge.svg) -![tests badge](https://github.com/facebookresearch/audiocraft/workflows/audiocraft_tests/badge.svg) - -Audiocraft is a PyTorch library for deep learning research on audio generation. At the moment, it contains the code for MusicGen, a state-of-the-art controllable text-to-music model. - -## MusicGen - -Audiocraft provides the code and models for MusicGen, [a simple and controllable model for music generation][arxiv]. MusicGen is a single stage auto-regressive -Transformer model trained over a 32kHz EnCodec tokenizer with 4 codebooks sampled at 50 Hz. Unlike existing methods like [MusicLM](https://arxiv.org/abs/2301.11325), MusicGen doesn't not require a self-supervised semantic representation, and it generates -all 4 codebooks in one pass. By introducing a small delay between the codebooks, we show we can predict -them in parallel, thus having only 50 auto-regressive steps per second of audio. -Check out our [sample page][musicgen_samples] or test the available demo! - - - Open In Colab - - - Open in HugginFace - -
- -## Installation -Audiocraft requires Python 3.9, PyTorch 2.0.0, and a GPU with at least 16 GB of memory (for the medium-sized model). To install Audiocraft, you can run the following: - -```shell -# Best to make sure you have torch installed first, in particular before installing xformers. -# Don't run this if you already have PyTorch installed. -pip install 'torch>=2.0' -# Then proceed to one of the following -pip install -U audiocraft # stable release -pip install -U git+https://git@github.com/facebookresearch/audiocraft#egg=audiocraft # bleeding edge -pip install -e . # or if you cloned the repo locally -``` - -## Usage -You can play with MusicGen by running the jupyter notebook at [`demo.ipynb`](./demo.ipynb) locally, or use the provided [colab notebook](https://colab.research.google.com/drive/1fxGqfg96RBUvGxZ1XXN07s3DthrKUl4-?usp=sharing). Finally, a demo is also available on the [`facebook/MusiGen` HugginFace Space](https://huggingface.co/spaces/facebook/MusicGen) (huge thanks to all the HF team for their support). - -## API - -We provide a simple API and 4 pre-trained models. The pre trained models are: -- `small`: 300M model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-small) -- `medium`: 1.5B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-medium) -- `melody`: 1.5B model, text to music and text+melody to music - [🤗 Hub](https://huggingface.co/facebook/musicgen-melody) -- `large`: 3.3B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-large) - -We observe the best trade-off between quality and compute with the `medium` or `melody` model. -In order to use MusicGen locally **you must have a GPU**. We recommend 16GB of memory, but smaller -GPUs will be able to generate short sequences, or longer sequences with the `small` model. - -**Note**: Please make sure to have [ffmpeg](https://ffmpeg.org/download.html) installed when using newer version of `torchaudio`. -You can install it with: -``` -apt get install ffmpeg -``` - -See after a quick example for using the API. - -```python -import torchaudio -from audiocraft.models import MusicGen -from audiocraft.data.audio import audio_write - -model = MusicGen.get_pretrained('melody') -model.set_generation_params(duration=8) # generate 8 seconds. -wav = model.generate_unconditional(4) # generates 4 unconditional audio samples -descriptions = ['happy rock', 'energetic EDM', 'sad jazz'] -wav = model.generate(descriptions) # generates 3 samples. - -melody, sr = torchaudio.load('./assets/bach.mp3') -# generates using the melody from the given audio and the provided descriptions. -wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr) - -for idx, one_wav in enumerate(wav): - # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. - audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness") -``` - - -## Model Card - -See [the model card page](./MODEL_CARD.md). - -## FAQ - -#### Will the training code be released? - -Yes. We will soon release the training code for MusicGen and EnCodec. - - -## Citation -``` -@article{copet2023simple, - title={Simple and Controllable Music Generation}, - author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez}, - year={2023}, - journal={arXiv preprint arXiv:2306.05284}, -} -``` - -## License -* The code in this repository is released under the MIT license as found in the [LICENSE file](LICENSE). -* The weights in this repository are released under the CC-BY-NC 4.0 license as found in the [LICENSE_weights file](LICENSE_weights). - -[arxiv]: https://arxiv.org/abs/2306.05284 -[musicgen_samples]: https://ai.honu.io/papers/musicgen/ diff --git a/spaces/Xenova/semantic-image-search-client/_next/static/chunks/482-32e037cb6270169e.js b/spaces/Xenova/semantic-image-search-client/_next/static/chunks/482-32e037cb6270169e.js deleted file mode 100644 index 8a9f1658452089aaf3d52e7b22327fa51344fe15..0000000000000000000000000000000000000000 --- a/spaces/Xenova/semantic-image-search-client/_next/static/chunks/482-32e037cb6270169e.js +++ /dev/null @@ -1,9 +0,0 @@ -(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[482],{5051:function(e,t,r){"use strict";r.d(t,{Jx:function(){return p}});var n=["0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","#","$","%","*","+",",","-",".",":",";","=","?","@","[","]","^","_","{","|","}","~"],i=e=>{let t=0;for(let r=0;r{let t=e/255;return t<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)},a=e=>{let t=Math.max(0,Math.min(1,e));return t<=.0031308?Math.trunc(3294.6*t+.5):Math.trunc((1.055*Math.pow(t,.4166666666666667)-.055)*255+.5)},l=e=>e<0?-1:1,u=(e,t)=>l(e)*Math.pow(Math.abs(e),t),s=class extends Error{constructor(e){super(e),this.name="ValidationError",this.message=e}},d=e=>{if(!e||e.length<6)throw new s("The blurhash string must be at least 6 characters");let t=i(e[0]),r=Math.floor(t/9)+1,n=t%9+1;if(e.length!==4+2*n*r)throw new s(`blurhash length mismatch: length is ${e.length} but it should be ${4+2*n*r}`)},f=e=>[o(e>>16),o(e>>8&255),o(255&e)],c=(e,t)=>[u((Math.floor(e/361)-9)/9,2)*t,u((Math.floor(e/19)%19-9)/9,2)*t,u((e%19-9)/9,2)*t],p=(e,t,r,n)=>{d(e),n|=1;let o=i(e[0]),l=Math.floor(o/9)+1,u=o%9+1,s=(i(e[1])+1)/166,p=Array(u*l);for(let t=0;t{}).then(()=>{if(e.parentElement&&e.isConnected){if("blur"===t&&i(!0),null==r?void 0:r.current){let t=new Event("load");Object.defineProperty(t,"target",{writable:!1,value:e});let n=!1,i=!1;r.current({...t,nativeEvent:t,currentTarget:e,target:e,isDefaultPrevented:()=>n,isPropagationStopped:()=>i,persist:()=>{},preventDefault:()=>{n=!0,t.preventDefault()},stopPropagation:()=>{i=!0,t.stopPropagation()}})}(null==n?void 0:n.current)&&n.current(e)}})}function m(e){let[t,r]=o.version.split("."),n=parseInt(t,10),i=parseInt(r,10);return n>18||18===n&&i>=3?{fetchPriority:e}:{fetchpriority:e}}let h=(0,o.forwardRef)((e,t)=>{let{src:r,srcSet:n,sizes:i,height:a,width:l,decoding:u,className:s,style:d,fetchPriority:f,placeholder:c,loading:p,unoptimized:h,fill:y,onLoadRef:b,onLoadingCompleteRef:v,setBlurComplete:_,setShowAltText:w,onLoad:S,onError:P,...O}=e;return o.default.createElement("img",{...O,...m(f),loading:p,width:l,height:a,decoding:u,"data-nimg":y?"fill":"1",className:s,style:d,sizes:i,srcSet:n,src:r,ref:(0,o.useCallback)(e=>{t&&("function"==typeof t?t(e):"object"==typeof t&&(t.current=e)),e&&(P&&(e.src=e.src),e.complete&&g(e,c,b,v,_,h))},[r,c,b,v,_,P,h,t]),onLoad:e=>{let t=e.currentTarget;g(t,c,b,v,_,h)},onError:e=>{w(!0),"blur"===c&&_(!0),P&&P(e)}})});function y(e){let{isAppRouter:t,imgAttributes:r}=e,n={as:"image",imageSrcSet:r.srcSet,imageSizes:r.sizes,crossOrigin:r.crossOrigin,referrerPolicy:r.referrerPolicy,...m(r.fetchPriority)};return t?((0,a.preload)(r.src,n),null):o.default.createElement(l.default,null,o.default.createElement("link",{key:"__nimg-"+r.src+r.srcSet+r.sizes,rel:"preload",href:r.srcSet?void 0:r.src,...n}))}let b=(0,o.forwardRef)((e,t)=>{let r=(0,o.useContext)(f.RouterContext),n=(0,o.useContext)(d.ImageConfigContext),i=(0,o.useMemo)(()=>{let e=p||n||s.imageConfigDefault,t=[...e.deviceSizes,...e.imageSizes].sort((e,t)=>e-t),r=e.deviceSizes.sort((e,t)=>e-t);return{...e,allSizes:t,deviceSizes:r}},[n]),{onLoad:a,onLoadingComplete:l}=e,g=(0,o.useRef)(a);(0,o.useEffect)(()=>{g.current=a},[a]);let m=(0,o.useRef)(l);(0,o.useEffect)(()=>{m.current=l},[l]);let[b,v]=(0,o.useState)(!1),[_,w]=(0,o.useState)(!1),{props:S,meta:P}=(0,u.getImgProps)(e,{defaultLoader:c.default,imgConf:i,blurComplete:b,showAltText:_});return o.default.createElement(o.default.Fragment,null,o.default.createElement(h,{...S,unoptimized:P.unoptimized,placeholder:P.placeholder,fill:P.fill,onLoadRef:g,onLoadingCompleteRef:m,setBlurComplete:v,setShowAltText:w,ref:t}),P.priority?o.default.createElement(y,{isAppRouter:!r,imgAttributes:S}):null)});("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},7555:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"AmpStateContext",{enumerable:!0,get:function(){return o}});let n=r(1024),i=n._(r(2265)),o=i.default.createContext({})},8551:function(e,t){"use strict";function r(e){let{ampFirst:t=!1,hybrid:r=!1,hasQuery:n=!1}=void 0===e?{}:e;return t||r&&n}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"isInAmpMode",{enumerable:!0,get:function(){return r}})},2301:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getImgProps",{enumerable:!0,get:function(){return l}}),r(7873);let n=r(9540),i=r(7709);function o(e){return void 0!==e.default}function a(e){return void 0===e?e:"number"==typeof e?Number.isFinite(e)?e:NaN:"string"==typeof e&&/^[0-9]+$/.test(e)?parseInt(e,10):NaN}function l(e,t){var r;let l,u,s,{src:d,sizes:f,unoptimized:c=!1,priority:p=!1,loading:g,className:m,quality:h,width:y,height:b,fill:v=!1,style:_,onLoad:w,onLoadingComplete:S,placeholder:P="empty",blurDataURL:O,fetchPriority:C,layout:j,objectFit:x,objectPosition:E,lazyBoundary:M,lazyRoot:I,...z}=e,{imgConf:k,showAltText:A,blurComplete:R,defaultLoader:D}=t,U=k||i.imageConfigDefault;if("allSizes"in U)l=U;else{let e=[...U.deviceSizes,...U.imageSizes].sort((e,t)=>e-t),t=U.deviceSizes.sort((e,t)=>e-t);l={...U,allSizes:e,deviceSizes:t}}let L=z.loader||D;delete z.loader,delete z.srcSet;let N="__next_img_default"in L;if(N){if("custom"===l.loader)throw Error('Image with src "'+d+'" is missing "loader" prop.\nRead more: https://nextjs.org/docs/messages/next-image-missing-loader')}else{let e=L;L=t=>{let{config:r,...n}=t;return e(n)}}if(j){"fill"===j&&(v=!0);let e={intrinsic:{maxWidth:"100%",height:"auto"},responsive:{width:"100%",height:"auto"}}[j];e&&(_={..._,...e});let t={responsive:"100vw",fill:"100vw"}[j];t&&!f&&(f=t)}let T="",F=a(y),W=a(b);if("object"==typeof(r=d)&&(o(r)||void 0!==r.src)){let e=o(d)?d.default:d;if(!e.src)throw Error("An object should only be passed to the image component src parameter if it comes from a static image import. It must include src. Received "+JSON.stringify(e));if(!e.height||!e.width)throw Error("An object should only be passed to the image component src parameter if it comes from a static image import. It must include height and width. Received "+JSON.stringify(e));if(u=e.blurWidth,s=e.blurHeight,O=O||e.blurDataURL,T=e.src,!v){if(F||W){if(F&&!W){let t=F/e.width;W=Math.round(e.height*t)}else if(!F&&W){let t=W/e.height;F=Math.round(e.width*t)}}else F=e.width,W=e.height}}let B=!p&&("lazy"===g||void 0===g);(!(d="string"==typeof d?d:T)||d.startsWith("data:")||d.startsWith("blob:"))&&(c=!0,B=!1),l.unoptimized&&(c=!0),N&&d.endsWith(".svg")&&!l.dangerouslyAllowSVG&&(c=!0),p&&(C="high");let V=a(h),$=Object.assign(v?{position:"absolute",height:"100%",width:"100%",left:0,top:0,right:0,bottom:0,objectFit:x,objectPosition:E}:{},A?{}:{color:"transparent"},_),G="blur"===P&&O&&!R?{backgroundSize:$.objectFit||"cover",backgroundPosition:$.objectPosition||"50% 50%",backgroundRepeat:"no-repeat",backgroundImage:'url("data:image/svg+xml;charset=utf-8,'+(0,n.getImageBlurSvg)({widthInt:F,heightInt:W,blurWidth:u,blurHeight:s,blurDataURL:O,objectFit:$.objectFit})+'")'}:{},H=function(e){let{config:t,src:r,unoptimized:n,width:i,quality:o,sizes:a,loader:l}=e;if(n)return{src:r,srcSet:void 0,sizes:void 0};let{widths:u,kind:s}=function(e,t,r){let{deviceSizes:n,allSizes:i}=e;if(r){let e=/(^|\s)(1?\d?\d)vw/g,t=[];for(let n;n=e.exec(r);n)t.push(parseInt(n[2]));if(t.length){let e=.01*Math.min(...t);return{widths:i.filter(t=>t>=n[0]*e),kind:"w"}}return{widths:i,kind:"w"}}if("number"!=typeof t)return{widths:n,kind:"w"};let o=[...new Set([t,2*t].map(e=>i.find(t=>t>=e)||i[i.length-1]))];return{widths:o,kind:"x"}}(t,i,a),d=u.length-1;return{sizes:a||"w"!==s?a:"100vw",srcSet:u.map((e,n)=>l({config:t,src:r,quality:o,width:e})+" "+("w"===s?e:n+1)+s).join(", "),src:l({config:t,src:r,quality:o,width:u[d]})}}({config:l,src:d,unoptimized:c,width:F,quality:V,sizes:f,loader:L}),q={...z,loading:B?"lazy":g,fetchPriority:C,width:F,height:W,decoding:"async",className:m,style:{...$,...G},sizes:H.sizes,srcSet:H.srcSet,src:H.src},J={unoptimized:c,priority:p,placeholder:P,fill:v};return{props:q,meta:J}}},2912:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{defaultHead:function(){return d},default:function(){return g}});let n=r(1024),i=r(8533),o=i._(r(2265)),a=n._(r(2378)),l=r(7555),u=r(1330),s=r(8551);function d(e){void 0===e&&(e=!1);let t=[o.default.createElement("meta",{charSet:"utf-8"})];return e||t.push(o.default.createElement("meta",{name:"viewport",content:"width=device-width"})),t}function f(e,t){return"string"==typeof t||"number"==typeof t?e:t.type===o.default.Fragment?e.concat(o.default.Children.toArray(t.props.children).reduce((e,t)=>"string"==typeof t||"number"==typeof t?e:e.concat(t),[])):e.concat(t)}r(7873);let c=["name","httpEquiv","charSet","itemProp"];function p(e,t){let{inAmpMode:r}=t;return e.reduce(f,[]).reverse().concat(d(r).reverse()).filter(function(){let e=new Set,t=new Set,r=new Set,n={};return i=>{let o=!0,a=!1;if(i.key&&"number"!=typeof i.key&&i.key.indexOf("$")>0){a=!0;let t=i.key.slice(i.key.indexOf("$")+1);e.has(t)?o=!1:e.add(t)}switch(i.type){case"title":case"base":t.has(i.type)?o=!1:t.add(i.type);break;case"meta":for(let e=0,t=c.length;e{let n=e.key||t;if(!r&&"link"===e.type&&e.props.href&&["https://fonts.googleapis.com/css","https://use.typekit.net/"].some(t=>e.props.href.startsWith(t))){let t={...e.props||{}};return t["data-href"]=t.href,t.href=void 0,t["data-optimized-fonts"]=!0,o.default.cloneElement(e,t)}return o.default.cloneElement(e,{key:n})})}let g=function(e){let{children:t}=e,r=(0,o.useContext)(l.AmpStateContext),n=(0,o.useContext)(u.HeadManagerContext);return o.default.createElement(a.default,{reduceComponentsToState:p,headManager:n,inAmpMode:(0,s.isInAmpMode)(r)},t)};("function"==typeof t.default||"object"==typeof t.default&&null!==t.default)&&void 0===t.default.__esModule&&(Object.defineProperty(t.default,"__esModule",{value:!0}),Object.assign(t.default,t),e.exports=t.default)},9540:function(e,t){"use strict";function r(e){let{widthInt:t,heightInt:r,blurWidth:n,blurHeight:i,blurDataURL:o,objectFit:a}=e,l=n||t,u=i||r,s=o.startsWith("data:image/jpeg")?"%3CfeComponentTransfer%3E%3CfeFuncA type='discrete' tableValues='1 1'/%3E%3C/feComponentTransfer%3E%":"";return l&&u?"%3Csvg xmlns='http%3A//www.w3.org/2000/svg' viewBox='0 0 "+l+" "+u+"'%3E%3Cfilter id='b' color-interpolation-filters='sRGB'%3E%3CfeGaussianBlur stdDeviation='"+(n&&i?"1":"20")+"'/%3E"+s+"%3C/filter%3E%3Cimage preserveAspectRatio='none' filter='url(%23b)' x='0' y='0' height='100%25' width='100%25' href='"+o+"'/%3E%3C/svg%3E":"%3Csvg xmlns='http%3A//www.w3.org/2000/svg'%3E%3Cimage style='filter:blur(20px)' preserveAspectRatio='"+("contain"===a?"xMidYMid":"cover"===a?"xMidYMid slice":"none")+"' x='0' y='0' height='100%25' width='100%25' href='"+o+"'/%3E%3C/svg%3E"}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"getImageBlurSvg",{enumerable:!0,get:function(){return r}})},9469:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"ImageConfigContext",{enumerable:!0,get:function(){return a}});let n=r(1024),i=n._(r(2265)),o=r(7709),a=i.default.createContext(o.imageConfigDefault)},7709:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{VALID_LOADERS:function(){return r},imageConfigDefault:function(){return n}});let r=["default","imgix","cloudinary","akamai","custom"],n={deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",loaderFile:"",domains:[],disableStaticImages:!1,minimumCacheTTL:60,formats:["image/webp"],dangerouslyAllowSVG:!1,contentSecurityPolicy:"script-src 'none'; frame-src 'none'; sandbox;",contentDispositionType:"inline",remotePatterns:[],unoptimized:!1}},1295:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),function(e,t){for(var r in t)Object.defineProperty(e,r,{enumerable:!0,get:t[r]})}(t,{default:function(){return s},unstable_getImgProps:function(){return u}});let n=r(1024),i=r(2301),o=r(7873),a=r(3222),l=n._(r(5033)),u=e=>{(0,o.warnOnce)("Warning: unstable_getImgProps() is experimental and may change or be removed at any time. Use at your own risk.");let{props:t}=(0,i.getImgProps)(e,{defaultLoader:l.default,imgConf:{deviceSizes:[640,750,828,1080,1200,1920,2048,3840],imageSizes:[16,32,48,64,96,128,256,384],path:"/_next/image",loader:"default",dangerouslyAllowSVG:!1,unoptimized:!1}});for(let[e,r]of Object.entries(t))void 0===r&&delete t[e];return{props:t}},s=a.Image},5033:function(e,t){"use strict";function r(e){let{config:t,src:r,width:n,quality:i}=e;return t.path+"?url="+encodeURIComponent(r)+"&w="+n+"&q="+(i||75)}Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return n}}),r.__next_img_default=!0;let n=r},2706:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"RouterContext",{enumerable:!0,get:function(){return o}});let n=r(1024),i=n._(r(2265)),o=i.default.createContext(null)},2378:function(e,t,r){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return l}});let n=r(8533),i=n._(r(2265)),o=i.useLayoutEffect,a=i.useEffect;function l(e){let{headManager:t,reduceComponentsToState:r}=e;function n(){if(t&&t.mountedInstances){let n=i.Children.toArray(Array.from(t.mountedInstances).filter(Boolean));t.updateHead(r(n,e))}}return o(()=>{var r;return null==t||null==(r=t.mountedInstances)||r.add(e.children),()=>{var r;null==t||null==(r=t.mountedInstances)||r.delete(e.children)}}),o(()=>(t&&(t._pendingUpdate=n),()=>{t&&(t._pendingUpdate=n)})),a(()=>(t&&t._pendingUpdate&&(t._pendingUpdate(),t._pendingUpdate=null),()=>{t&&t._pendingUpdate&&(t._pendingUpdate(),t._pendingUpdate=null)})),null}},7873:function(e,t){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"warnOnce",{enumerable:!0,get:function(){return r}});let r=e=>{}},622:function(e,t,r){"use strict";/** - * @license React - * react-jsx-runtime.production.min.js - * - * Copyright (c) Meta Platforms, Inc. and affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */var n=r(2265),i=Symbol.for("react.element"),o=Symbol.for("react.fragment"),a=Object.prototype.hasOwnProperty,l=n.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED.ReactCurrentOwner,u={key:!0,ref:!0,__self:!0,__source:!0};function s(e,t,r){var n,o={},s=null,d=null;for(n in void 0!==r&&(s=""+r),void 0!==t.key&&(s=""+t.key),void 0!==t.ref&&(d=t.ref),t)a.call(t,n)&&!u.hasOwnProperty(n)&&(o[n]=t[n]);if(e&&e.defaultProps)for(n in t=e.defaultProps)void 0===o[n]&&(o[n]=t[n]);return{$$typeof:i,type:e,key:s,ref:d,props:o,_owner:l.current}}t.Fragment=o,t.jsx=s,t.jsxs=s},7437:function(e,t,r){"use strict";e.exports=r(622)},6691:function(e,t,r){e.exports=r(1295)}}]); \ No newline at end of file diff --git a/spaces/XzJosh/nine1-Bert-VITS2/server.py b/spaces/XzJosh/nine1-Bert-VITS2/server.py deleted file mode 100644 index c736ca4f95fec853950eef6654ef79856beffc0a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/nine1-Bert-VITS2/server.py +++ /dev/null @@ -1,123 +0,0 @@ -from flask import Flask, request, Response -from io import BytesIO -import torch -from av import open as avopen - -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -from scipy.io import wavfile - -# Flask Init -app = Flask(__name__) -app.config['JSON_AS_ASCII'] = False -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - print([f"{p}{t}" for p, t in zip(phone, tone)]) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - - assert bert.shape[-1] == len(phone) - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - - return bert, phone, tone, language - -def infer(text, sdp_ratio, noise_scale, noise_scale_w,length_scale,sid): - bert, phones, tones, lang_ids = get_text(text,"ZH", hps,) - with torch.no_grad(): - x_tst=phones.to(dev).unsqueeze(0) - tones=tones.to(dev).unsqueeze(0) - lang_ids=lang_ids.to(dev).unsqueeze(0) - bert = bert.to(dev).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(dev) - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(dev) - audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids,bert, sdp_ratio=sdp_ratio - , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() - return audio - -def replace_punctuation(text, i=2): - punctuation = ",。?!" - for char in punctuation: - text = text.replace(char, char * i) - return text - -def wav2(i, o, format): - inp = avopen(i, 'rb') - out = avopen(o, 'wb', format=format) - if format == "ogg": format = "libvorbis" - - ostream = out.add_stream(format) - - for frame in inp.decode(audio=0): - for p in ostream.encode(frame): out.mux(p) - - for p in ostream.encode(None): out.mux(p) - - out.close() - inp.close() - -# Load Generator -hps = utils.get_hparams_from_file("./configs/config.json") - -dev='cuda' -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).to(dev) -_ = net_g.eval() - -_ = utils.load_checkpoint("logs/G_649000.pth", net_g, None,skip_optimizer=True) - -@app.route("/",methods=['GET','POST']) -def main(): - if request.method == 'GET': - try: - speaker = request.args.get('speaker') - text = request.args.get('text').replace("/n","") - sdp_ratio = float(request.args.get("sdp_ratio", 0.2)) - noise = float(request.args.get("noise", 0.5)) - noisew = float(request.args.get("noisew", 0.6)) - length = float(request.args.get("length", 1.2)) - if length >= 2: - return "Too big length" - if len(text) >=200: - return "Too long text" - fmt = request.args.get("format", "wav") - if None in (speaker, text): - return "Missing Parameter" - if fmt not in ("mp3", "wav", "ogg"): - return "Invalid Format" - except: - return "Invalid Parameter" - - with torch.no_grad(): - audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise, noise_scale_w=noisew, length_scale=length, sid=speaker) - - with BytesIO() as wav: - wavfile.write(wav, hps.data.sampling_rate, audio) - torch.cuda.empty_cache() - if fmt == "wav": - return Response(wav.getvalue(), mimetype="audio/wav") - wav.seek(0, 0) - with BytesIO() as ofp: - wav2(wav, ofp, fmt) - return Response( - ofp.getvalue(), - mimetype="audio/mpeg" if fmt == "mp3" else "audio/ogg" - ) diff --git a/spaces/YONG627/456123/yolov5-code-main/utils/aws/mime.sh b/spaces/YONG627/456123/yolov5-code-main/utils/aws/mime.sh deleted file mode 100644 index c319a83cfbdf09bea634c3bd9fca737c0b1dd505..0000000000000000000000000000000000000000 --- a/spaces/YONG627/456123/yolov5-code-main/utils/aws/mime.sh +++ /dev/null @@ -1,26 +0,0 @@ -# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ -# This script will run on every instance restart, not only on first start -# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- - -Content-Type: multipart/mixed; boundary="//" -MIME-Version: 1.0 - ---// -Content-Type: text/cloud-config; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="cloud-config.txt" - -#cloud-config -cloud_final_modules: -- [scripts-user, always] - ---// -Content-Type: text/x-shellscript; charset="us-ascii" -MIME-Version: 1.0 -Content-Transfer-Encoding: 7bit -Content-Disposition: attachment; filename="userdata.txt" - -#!/bin/bash -# --- paste contents of userdata.sh here --- ---// diff --git a/spaces/YUANAI/DiffspeechResearch/modules/tts/portaspeech/portaspeech_flow.py b/spaces/YUANAI/DiffspeechResearch/modules/tts/portaspeech/portaspeech_flow.py deleted file mode 100644 index 256887dd8b365e38ac6c1973f4ec376e93029652..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/modules/tts/portaspeech/portaspeech_flow.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch -import torch.distributions as dist -from torch import nn -from modules.commons.normalizing_flow.glow_modules import Glow -from modules.tts.portaspeech.portaspeech import PortaSpeech - - -class PortaSpeechFlow(PortaSpeech): - def __init__(self, ph_dict_size, word_dict_size, hparams, out_dims=None): - super().__init__(ph_dict_size, word_dict_size, hparams, out_dims) - cond_hs = 80 - if hparams.get('use_txt_cond', True): - cond_hs = cond_hs + hparams['hidden_size'] - if hparams.get('use_latent_cond', False): - cond_hs = cond_hs + hparams['latent_size'] - if hparams['use_cond_proj']: - self.g_proj = nn.Conv1d(cond_hs, 160, 5, padding=2) - cond_hs = 160 - self.post_flow = Glow( - 80, hparams['post_glow_hidden'], hparams['post_glow_kernel_size'], 1, - hparams['post_glow_n_blocks'], hparams['post_glow_n_block_layers'], - n_split=4, n_sqz=2, - gin_channels=cond_hs, - share_cond_layers=hparams['post_share_cond_layers'], - share_wn_layers=hparams['share_wn_layers'], - sigmoid_scale=hparams['sigmoid_scale'] - ) - self.prior_dist = dist.Normal(0, 1) - - def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, - spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, - forward_post_glow=True, two_stage=True, global_step=None): - is_training = self.training - train_fvae = not (forward_post_glow and two_stage) - if not train_fvae: - self.eval() - with torch.set_grad_enabled(mode=train_fvae): - ret = super(PortaSpeechFlow, self).forward( - txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, - spk_embed, spk_id, pitch, infer, tgt_mels, global_step) - if (forward_post_glow or not two_stage) and self.hparams['use_post_flow']: - self.run_post_glow(tgt_mels, infer, is_training, ret) - return ret - - def run_post_glow(self, tgt_mels, infer, is_training, ret): - x_recon = ret['mel_out'].transpose(1, 2) - g = x_recon - B, _, T = g.shape - if self.hparams.get('use_txt_cond', True): - g = torch.cat([g, ret['decoder_inp'].transpose(1, 2)], 1) - if self.hparams.get('use_latent_cond', False): - g_z = ret['z_p'][:, :, :, None].repeat(1, 1, 1, 4).reshape(B, -1, T) - g = torch.cat([g, g_z], 1) - if self.hparams['use_cond_proj']: - g = self.g_proj(g) - prior_dist = self.prior_dist - if not infer: - if is_training: - self.post_flow.train() - nonpadding = ret['nonpadding'].transpose(1, 2) - y_lengths = nonpadding.sum(-1) - if self.hparams['detach_postflow_input']: - g = g.detach() - tgt_mels = tgt_mels.transpose(1, 2) - z_postflow, ldj = self.post_flow(tgt_mels, nonpadding, g=g) - ldj = ldj / y_lengths / 80 - ret['z_pf'], ret['ldj_pf'] = z_postflow, ldj - ret['postflow'] = -prior_dist.log_prob(z_postflow).mean() - ldj.mean() - if torch.isnan(ret['postflow']): - ret['postflow'] = None - else: - nonpadding = torch.ones_like(x_recon[:, :1, :]) - z_post = torch.randn(x_recon.shape).to(g.device) * self.hparams['noise_scale'] - x_recon, _ = self.post_flow(z_post, nonpadding, g, reverse=True) - ret['mel_out'] = x_recon.transpose(1, 2) diff --git a/spaces/Yuliang/ICON/app.py b/spaces/Yuliang/ICON/app.py deleted file mode 100644 index f528aa27b29b1f15645060c6c49432234939f364..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ICON/app.py +++ /dev/null @@ -1,156 +0,0 @@ -# install - - -import glob -import gradio as gr -import os -import numpy as np - -import subprocess - -if os.getenv('SYSTEM') == 'spaces': - subprocess.run('pip install pyembree'.split()) - subprocess.run( - 'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split()) - subprocess.run( - 'pip install https://download.is.tue.mpg.de/icon/HF/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl'.split()) - subprocess.run( - 'pip install https://download.is.tue.mpg.de/icon/HF/pytorch3d-0.7.0-cp38-cp38-linux_x86_64.whl'.split()) - subprocess.run( - 'pip install git+https://github.com/YuliangXiu/neural_voxelization_layer.git'.split()) - -from apps.infer import generate_model - -# running - -description = ''' -# ICON Clothed Human Digitization -### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022) - - - - -
- -Twitter Follow - -YouTube Video Views - - -
- -

The reconstruction + refinement + video take about 200 seconds for single image.
ICON is only suitable for humanoid images and will not work well on cartoons with non-human shapes.

- -
- -More - -#### Citation -``` -@inproceedings{xiu2022icon, - title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals}, - author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.}, - booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, - month = {June}, - year = {2022}, - pages = {13296-13306} -} -``` - -#### Acknowledgments: - -- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/) -- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu) -- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization) - -#### Image Credits - -* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox) - -#### Related works - -* [ICON @ MPI](https://icon.is.tue.mpg.de/) -* [MonoPort @ USC](https://xiuyuliang.cn/monoport) -* [Phorhum @ Google](https://phorhum.github.io/) -* [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/) -* [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html) - -
-''' - - -def generate_image(seed, psi): - iface = gr.Interface.load("spaces/hysts/StyleGAN-Human") - img = iface(seed, psi) - return img - - -model_types = ['ICON', 'PIFu', 'PaMIR'] -examples_names = glob.glob('examples/*.png') -examples_types = np.random.choice( - model_types, len(examples_names), p=[0.6, 0.2, 0.2]) - -examples = [list(item) for item in zip(examples_names, examples_types)] - -with gr.Blocks() as demo: - gr.Markdown(description) - - out_lst = [] - with gr.Row(): - with gr.Column(): - with gr.Row(): - with gr.Column(): - seed = gr.inputs.Slider( - 0, 1000, step=1, default=0, label='Seed (For Image Generation)') - psi = gr.inputs.Slider( - 0, 2, step=0.05, default=0.7, label='Truncation psi (For Image Generation)') - radio_choice = gr.Radio( - model_types, label='Method (For Reconstruction)', value='icon-filter') - inp = gr.Image(type="filepath", label="Input Image") - with gr.Row(): - btn_sample = gr.Button("Generate Image") - btn_submit = gr.Button("Submit Image") - - gr.Examples(examples=examples, - inputs=[inp, radio_choice], - cache_examples=False, - fn=generate_model, - outputs=out_lst) - - out_vid = gr.Video( - label="Image + Normal + SMPL Body + Clothed Human") - out_vid_download = gr.File( - label="Download Video, welcome share on Twitter with #ICON") - - with gr.Column(): - overlap_inp = gr.Image( - type="filepath", label="Image Normal Overlap") - out_final = gr.Model3D( - clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human") - out_final_download = gr.File( - label="Download clothed human mesh") - out_smpl = gr.Model3D( - clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL body") - out_smpl_download = gr.File(label="Download SMPL body mesh") - out_smpl_npy_download = gr.File(label="Download SMPL params") - - out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download, - out_final, out_final_download, out_vid, out_vid_download, overlap_inp] - - btn_submit.click(fn=generate_model, inputs=[ - inp, radio_choice], outputs=out_lst) - btn_sample.click(fn=generate_image, inputs=[seed, psi], outputs=inp) - -if __name__ == "__main__": - - # demo.launch(debug=False, enable_queue=False, - # auth=(os.environ['USER'], os.environ['PASSWORD']), - # auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.") - - demo.launch(debug=True, enable_queue=True) diff --git a/spaces/ZeroTwo3/WavJourney/parse_voice.py b/spaces/ZeroTwo3/WavJourney/parse_voice.py deleted file mode 100644 index 9583f402cfb23aede18d421befd2508633b1d23c..0000000000000000000000000000000000000000 --- a/spaces/ZeroTwo3/WavJourney/parse_voice.py +++ /dev/null @@ -1,31 +0,0 @@ -import os -import argparse -from VoiceParser.model import VoiceParser - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--wav-path', type=str, help="Path of a wav file") - parser.add_argument('--wav-dir', type=str, help="Directory of wav files") - parser.add_argument('--out-dir', type=str, help="Directory of output npz files") - args = parser.parse_args() - - if (args.wav_path is None and args.wav_dir is None) or (args.wav_path is not None and args.wav_dir is not None): - parser.error("Please provide either '--wav-path' or '--wav-dir', but not both.") - - out_dir = args.out_dir - - model = VoiceParser(device='cpu') - - if args.wav_path is not None: - model.extract_acoustic_embed(args.wav_path, out_dir) - print(f'Sucessfully parsed {args.wav_path}') - else: - wav_name_list = os.listdir(args.wav_dir) - for wav_name in wav_name_list: - wav_path = os.path.join(args.wav_dir, wav_name) - model.extract_acoustic_embed(wav_path, out_dir) - print(f'Sucessfully parsed {wav_path}') - - -if __name__ == '__main__': - main() diff --git a/spaces/Zwicky18/vits-models/modules.py b/spaces/Zwicky18/vits-models/modules.py deleted file mode 100644 index 56ea4145eddf19dd330a3a41ab0183efc1686d83..0000000000000000000000000000000000000000 --- a/spaces/Zwicky18/vits-models/modules.py +++ /dev/null @@ -1,388 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/a-v-bely/russian-task-generator/README.md b/spaces/a-v-bely/russian-task-generator/README.md deleted file mode 100644 index 1d661d5fdd019d0cb5dce142430e3f12048c002a..0000000000000000000000000000000000000000 --- a/spaces/a-v-bely/russian-task-generator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Russian Task Generator -emoji: 📚 -colorFrom: pink -colorTo: yellow -sdk: streamlit -sdk_version: 1.20.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/abdulsamod/crop_yield/README.md b/spaces/abdulsamod/crop_yield/README.md deleted file mode 100644 index 2affd3f4903737b4c089517cf1f9242620296fdc..0000000000000000000000000000000000000000 --- a/spaces/abdulsamod/crop_yield/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Crop Yield -emoji: 🦀 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/abdvl/datahub_qa_bot/docs/api/tutorials/adding-column-description.md b/spaces/abdvl/datahub_qa_bot/docs/api/tutorials/adding-column-description.md deleted file mode 100644 index fb7bf34f6e7d8853467592f55dcb3c743c030b68..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/api/tutorials/adding-column-description.md +++ /dev/null @@ -1,200 +0,0 @@ -# Adding Description on Columns - -## Why Would You Add Description on Columns? -Adding column descriptions(documentation) to a dataset can provide crucial context for understanding the data and its variables. This can aid in data exploration, cleaning, and analysis, as well as ensure that others can understand the data if it is shared or used in collaboration. Additionally, column descriptions can help prevent errors and misunderstandings by clearly defining the meaning and units of measurement for each variable. - -### Goal Of This Guide -This guide will show you how to add a description to `user_name `column of a dataset `fct_users_deleted`. - - -## Prerequisites -For this tutorial, you need to deploy DataHub Quickstart and ingest sample data. -For detailed steps, please refer to [Prepare Local DataHub Environment](/docs/api/tutorials/references/prepare-datahub.md). - -:::note -Before adding a description, you need to ensure the targeted dataset is already present in your datahub. -If you attempt to manipulate entities that do not exist, your operation will fail. -In this guide, we will be using data from sample ingestion. -::: - -In this example, we will add a description to `user_name `column of a dataset `fct_users_deleted`. - -## Add Description With GraphQL - -:::note -Please note that there are two available endpoints (`:8000`, `:9002`) to access GraphQL. -For more information about the differences between these endpoints, please refer to [DataHub Metadata Service](../../../metadata-service/README.md#graphql-api) -::: - -### GraphQL Explorer -GraphQL Explorer is the fastest way to experiment with GraphQL without any dependencies. -Navigate to GraphQL Explorer (`http://localhost:9002/api/graphiql`) and run the following query. - -```json -mutation updateDescription { - updateDescription( - input: { - description: "Name of the user who was deleted. This description is updated via GrpahQL.", - resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)", - subResource: "user_name", - subResourceType:DATASET_FIELD - } - ) -} -``` - -Note that you can use general markdown in `description`. For example, you can do the following. - -```json -mutation updateDescription { - updateDescription( - input: { - description: """ - ### User Name - The `user_name` column is a primary key column that contains the name of the user who was deleted. - """, - resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)", - subResource: "user_name", - subResourceType:DATASET_FIELD - } - ) -} -``` - -`updateDescription` currently only supports Dataset Schema Fields, Containers. -For more information about the `updateDescription` mutation, please refer to [updateLineage](https://datahubproject.io/docs/graphql/mutations/#updateDescription). - - -If you see the following response, the operation was successful: -```python -{ - "data": { - "updateDescription": true - }, - "extensions": {} -} -``` - -### CURL - -With CURL, you need to provide tokens. To generate a token, please refer to [Generate Access Token](/docs/api/tutorials/references/generate-access-token.md). -With `accessToken`, you can run the following command. - -```shell -curl --location --request POST 'http://localhost:8080/api/graphql' \ ---header 'Authorization: Bearer ' \ ---header 'Content-Type: application/json' \ ---data-raw '{ "query": "mutation updateDescription { updateDescription ( input: { description: \"Name of the user who was deleted. This description is updated via GrpahQL.\", resourceUrn: \"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)\", subResource: \"user_name\", subResourceType:DATASET_FIELD }) }", "variables":{}}' -``` -Expected Response: -```json -{"data":{"updateDescription":true},"extensions":{}} -``` - - -## Add Description With Python SDK -Following code add a description to `user_name `column of a dataset `fct_users_deleted`. - -```python -import logging -import time - -from datahub.emitter.mce_builder import make_dataset_urn -from datahub.emitter.mcp import MetadataChangeProposalWrapper - -# read-modify-write requires access to the DataHubGraph (RestEmitter is not enough) -from datahub.ingestion.graph.client import DatahubClientConfig, DataHubGraph - -# Imports for metadata model classes -from datahub.metadata.schema_classes import ( - AuditStampClass, - InstitutionalMemoryClass, - EditableSchemaMetadataClass, - EditableSchemaFieldInfoClass, -) - -log = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO) - -def get_simple_field_path_from_v2_field_path(field_path: str) -> str: - """A helper function to extract simple . path notation from the v2 field path""" - if not field_path.startswith("[version=2.0]"): - # not a v2, we assume this is a simple path - return field_path - # this is a v2 field path - tokens = [ - t for t in field_path.split(".") if not (t.startswith("[") or t.endswith("]")) - ] - - return ".".join(tokens) - -# Inputs -> owner, ownership_type, dataset -documentation_to_add = "Name of the user who was deleted. This description is updated via PythonSDK." -dataset_urn = make_dataset_urn(platform="hive", name="fct_users_deleted", env="PROD") -column = "user_name" -field_info_to_set = EditableSchemaFieldInfoClass( - fieldPath=column, description=documentation_to_add -) - - -# Some helpful variables to fill out objects later -now = int(time.time() * 1000) # milliseconds since epoch -current_timestamp = AuditStampClass(time=now, actor="urn:li:corpuser:ingestion") - - -# First we get the current owners -gms_endpoint = "http://localhost:8080" -graph = DataHubGraph(config=DatahubClientConfig(server=gms_endpoint)) - -current_editable_schema_metadata = graph.get_aspect( - entity_urn=dataset_urn, - aspect_type=EditableSchemaMetadataClass, -) - - -need_write = False - -if current_editable_schema_metadata: - for fieldInfo in current_editable_schema_metadata.editableSchemaFieldInfo: - if get_simple_field_path_from_v2_field_path(fieldInfo.fieldPath) == column: - # we have some editable schema metadata for this field - field_match = True - if documentation_to_add != fieldInfo.description: - fieldInfo.description = documentation_to_add - need_write = True -else: - # create a brand new editable dataset properties aspect - current_editable_schema_metadata = EditableSchemaMetadataClass( - editableSchemaFieldInfo=[field_info_to_set], - created=current_timestamp, - ) - need_write = True - -if need_write: - event: MetadataChangeProposalWrapper = MetadataChangeProposalWrapper( - entityUrn=dataset_urn, - aspect=current_editable_schema_metadata, - ) - graph.emit(event) - log.info(f"Documentation added to dataset {dataset_urn}") - -else: - log.info("Documentation already exists and is identical, omitting write") - - -current_institutional_memory = graph.get_aspect( - entity_urn=dataset_urn, aspect_type=InstitutionalMemoryClass -) - -need_write = False -``` - -We're using the `MetdataChangeProposalWrapper` to change entities in this example. -For more information about the `MetadataChangeProposal`, please refer to [MetadataChangeProposal & MetadataChangeLog Events](/docs/advanced/mcp-mcl.md) - - -## Expected Outcomes -You can now see column description is added to `user_name` column of `fct_users_deleted`. - -![column-description-added](../../imgs/apis/tutorials/column-description-added.png) - diff --git a/spaces/abdvl/datahub_qa_bot/docs/what/graph.md b/spaces/abdvl/datahub_qa_bot/docs/what/graph.md deleted file mode 100644 index 092ba0e7838d5253f85a49bd0e45e3785be2e601..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/what/graph.md +++ /dev/null @@ -1,15 +0,0 @@ -# What is GMA graph? - -All the [entities](entity.md) and [relationships](relationship.md) are stored in a graph database, Neo4j. -The graph always represents the current state of the world and has no direct support for versioning or history. -However, as stated in the [Metadata Modeling](../modeling/metadata-model.md) section, -the graph is merely a derived view of all metadata [aspects](aspect.md) thus can always be rebuilt directly from historic [MAEs](mxe.md#metadata-audit-event-mae). -Consequently, it is possible to build a specific snapshot of the graph in time by replaying MAEs up to that point. - -In theory, the system can work with any generic [OLTP](https://en.wikipedia.org/wiki/Online_transaction_processing) graph DB that supports the following operations: -* Dynamical creation, modification, and removal of nodes and edges -* Dynamical attachment of key-value properties to each node and edge -* Transactional partial updates of properties of a specific node or edge -* Fast ID-based retrieval of nodes & edges -* Efficient queries involving both graph traversal and properties value filtering -* Support efficient bidirectional graph traversal diff --git a/spaces/abhaskumarsinha/MinimalGPT-Felis_Catus/subword/chrF.py b/spaces/abhaskumarsinha/MinimalGPT-Felis_Catus/subword/chrF.py deleted file mode 100644 index 3a35941d61b618a8b32d937b51f0d10071129bd6..0000000000000000000000000000000000000000 --- a/spaces/abhaskumarsinha/MinimalGPT-Felis_Catus/subword/chrF.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Author: Rico Sennrich - -"""Compute chrF3 for machine translation evaluation - -Reference: -Maja Popović (2015). chrF: character n-gram F-score for automatic MT evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translationn, pages 392–395, Lisbon, Portugal. -""" - -from __future__ import print_function, unicode_literals, division - -import sys -import codecs -import io -import argparse - -from collections import defaultdict - -# hack for python2/3 compatibility -from io import open -argparse.open = open - -def create_parser(): - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description="learn BPE-based word segmentation") - - parser.add_argument( - '--ref', '-r', type=argparse.FileType('r'), required=True, - metavar='PATH', - help="Reference file") - parser.add_argument( - '--hyp', type=argparse.FileType('r'), metavar='PATH', - default=sys.stdin, - help="Hypothesis file (default: stdin).") - parser.add_argument( - '--beta', '-b', type=float, default=3, - metavar='FLOAT', - help="beta parameter (default: '%(default)s')") - parser.add_argument( - '--ngram', '-n', type=int, default=6, - metavar='INT', - help="ngram order (default: '%(default)s')") - parser.add_argument( - '--space', '-s', action='store_true', - help="take spaces into account (default: '%(default)s')") - parser.add_argument( - '--precision', action='store_true', - help="report precision (default: '%(default)s')") - parser.add_argument( - '--recall', action='store_true', - help="report recall (default: '%(default)s')") - - return parser - -def extract_ngrams(words, max_length=4, spaces=False): - - if not spaces: - words = ''.join(words.split()) - else: - words = words.strip() - - results = defaultdict(lambda: defaultdict(int)) - for length in range(max_length): - for start_pos in range(len(words)): - end_pos = start_pos + length + 1 - if end_pos <= len(words): - results[length][tuple(words[start_pos: end_pos])] += 1 - return results - - -def get_correct(ngrams_ref, ngrams_test, correct, total): - - for rank in ngrams_test: - for chain in ngrams_test[rank]: - total[rank] += ngrams_test[rank][chain] - if chain in ngrams_ref[rank]: - correct[rank] += min(ngrams_test[rank][chain], ngrams_ref[rank][chain]) - - return correct, total - - -def f1(correct, total_hyp, total_ref, max_length, beta=3, smooth=0): - - precision = 0 - recall = 0 - - for i in range(max_length): - if total_hyp[i] + smooth and total_ref[i] + smooth: - precision += (correct[i] + smooth) / (total_hyp[i] + smooth) - recall += (correct[i] + smooth) / (total_ref[i] + smooth) - - precision /= max_length - recall /= max_length - - return (1 + beta**2) * (precision*recall) / ((beta**2 * precision) + recall), precision, recall - -def main(args): - - correct = [0]*args.ngram - total = [0]*args.ngram - total_ref = [0]*args.ngram - for line in args.ref: - line2 = args.hyp.readline() - - ngrams_ref = extract_ngrams(line, max_length=args.ngram, spaces=args.space) - ngrams_test = extract_ngrams(line2, max_length=args.ngram, spaces=args.space) - - get_correct(ngrams_ref, ngrams_test, correct, total) - - for rank in ngrams_ref: - for chain in ngrams_ref[rank]: - total_ref[rank] += ngrams_ref[rank][chain] - - chrf, precision, recall = f1(correct, total, total_ref, args.ngram, args.beta) - - print('chrF3: {0:.4f}'.format(chrf)) - if args.precision: - print('chrPrec: {0:.4f}'.format(precision)) - if args.recall: - print('chrRec: {0:.4f}'.format(recall)) - -if __name__ == '__main__': - - # python 2/3 compatibility - if sys.version_info < (3, 0): - sys.stderr = codecs.getwriter('UTF-8')(sys.stderr) - sys.stdout = codecs.getwriter('UTF-8')(sys.stdout) - sys.stdin = codecs.getreader('UTF-8')(sys.stdin) - else: - sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8') - sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8') - sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', write_through=True, line_buffering=True) - - parser = create_parser() - args = parser.parse_args() - - main(args) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/backbones/darknet.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/backbones/darknet.py deleted file mode 100644 index 517fe26259217792e0dad80ca3824d914cfe3904..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/backbones/darknet.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) 2019 Western Digital Corporation or its affiliates. - -import logging - -import torch.nn as nn -from mmcv.cnn import ConvModule, constant_init, kaiming_init -from mmcv.runner import load_checkpoint -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES - - -class ResBlock(nn.Module): - """The basic residual block used in Darknet. Each ResBlock consists of two - ConvModules and the input is added to the final output. Each ConvModule is - composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer - has half of the number of the filters as much as the second convLayer. The - first convLayer has filter size of 1x1 and the second one has the filter - size of 3x3. - - Args: - in_channels (int): The input channels. Must be even. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - """ - - def __init__(self, - in_channels, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1)): - super(ResBlock, self).__init__() - assert in_channels % 2 == 0 # ensure the in_channels is even - half_in_channels = in_channels // 2 - - # shortcut - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - - self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg) - self.conv2 = ConvModule( - half_in_channels, in_channels, 3, padding=1, **cfg) - - def forward(self, x): - residual = x - out = self.conv1(x) - out = self.conv2(out) - out = out + residual - - return out - - -@BACKBONES.register_module() -class Darknet(nn.Module): - """Darknet backbone. - - Args: - depth (int): Depth of Darknet. Currently only support 53. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. Default: -1. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - - Example: - >>> from mmdet.models import Darknet - >>> import torch - >>> self = Darknet(depth=53) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 416, 416) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - ... - (1, 256, 52, 52) - (1, 512, 26, 26) - (1, 1024, 13, 13) - """ - - # Dict(depth: (layers, channels)) - arch_settings = { - 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), - (512, 1024))) - } - - def __init__(self, - depth=53, - out_indices=(3, 4, 5), - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - norm_eval=True): - super(Darknet, self).__init__() - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for darknet') - self.depth = depth - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.layers, self.channels = self.arch_settings[depth] - - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - - self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg) - - self.cr_blocks = ['conv1'] - for i, n_layers in enumerate(self.layers): - layer_name = f'conv_res_block{i + 1}' - in_c, out_c = self.channels[i] - self.add_module( - layer_name, - self.make_conv_res_block(in_c, out_c, n_layers, **cfg)) - self.cr_blocks.append(layer_name) - - self.norm_eval = norm_eval - - def forward(self, x): - outs = [] - for i, layer_name in enumerate(self.cr_blocks): - cr_block = getattr(self, layer_name) - x = cr_block(x) - if i in self.out_indices: - outs.append(x) - - return tuple(outs) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - - else: - raise TypeError('pretrained must be a str or None') - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for i in range(self.frozen_stages): - m = getattr(self, self.cr_blocks[i]) - m.eval() - for param in m.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super(Darknet, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() - - @staticmethod - def make_conv_res_block(in_channels, - out_channels, - res_repeat, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', - negative_slope=0.1)): - """In Darknet backbone, ConvLayer is usually followed by ResBlock. This - function will make that. The Conv layers always have 3x3 filters with - stride=2. The number of the filters in Conv layer is the same as the - out channels of the ResBlock. - - Args: - in_channels (int): The number of input channels. - out_channels (int): The number of output channels. - res_repeat (int): The number of ResBlocks. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - """ - - cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) - - model = nn.Sequential() - model.add_module( - 'conv', - ConvModule( - in_channels, out_channels, 3, stride=2, padding=1, **cfg)) - for idx in range(res_repeat): - model.add_module('res{}'.format(idx), - ResBlock(out_channels, **cfg)) - return model diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/pisa_retinanet_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/pisa_retinanet_head.py deleted file mode 100644 index bd87b9aeb07e05ff94b444ac8999eca3f616711a..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/pisa_retinanet_head.py +++ /dev/null @@ -1,154 +0,0 @@ -import torch -from mmcv.runner import force_fp32 - -from mmdet.core import images_to_levels -from ..builder import HEADS -from ..losses import carl_loss, isr_p -from .retina_head import RetinaHead - - -@HEADS.register_module() -class PISARetinaHead(RetinaHead): - """PISA Retinanet Head. - - The head owns the same structure with Retinanet Head, but differs in two - aspects: - 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to - change the positive loss weights. - 2. Classification-aware regression loss is adopted as a third loss. - """ - - @force_fp32(apply_to=('cls_scores', 'bbox_preds')) - def loss(self, - cls_scores, - bbox_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes of each image - with shape (num_obj, 4). - gt_labels (list[Tensor]): Ground truth labels of each image - with shape (num_obj, 4). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image. - Default: None. - - Returns: - dict: Loss dict, comprise classification loss, regression loss and - carl loss. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.anchor_generator.num_levels - - device = cls_scores[0].device - - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - return_sampling_results=True) - if cls_reg_targets is None: - return None - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, - num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets - num_total_samples = ( - num_total_pos + num_total_neg if self.sampling else num_total_pos) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - # concat all level anchors and flags to a single tensor - concat_anchor_list = [] - for i in range(len(anchor_list)): - concat_anchor_list.append(torch.cat(anchor_list[i])) - all_anchor_list = images_to_levels(concat_anchor_list, - num_level_anchors) - - num_imgs = len(img_metas) - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels) - for cls_score in cls_scores - ] - flatten_cls_scores = torch.cat( - flatten_cls_scores, dim=1).reshape(-1, - flatten_cls_scores[0].size(-1)) - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) - for bbox_pred in bbox_preds - ] - flatten_bbox_preds = torch.cat( - flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1)) - flatten_labels = torch.cat(labels_list, dim=1).reshape(-1) - flatten_label_weights = torch.cat( - label_weights_list, dim=1).reshape(-1) - flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4) - flatten_bbox_targets = torch.cat( - bbox_targets_list, dim=1).reshape(-1, 4) - flatten_bbox_weights = torch.cat( - bbox_weights_list, dim=1).reshape(-1, 4) - - # Apply ISR-P - isr_cfg = self.train_cfg.get('isr', None) - if isr_cfg is not None: - all_targets = (flatten_labels, flatten_label_weights, - flatten_bbox_targets, flatten_bbox_weights) - with torch.no_grad(): - all_targets = isr_p( - flatten_cls_scores, - flatten_bbox_preds, - all_targets, - flatten_anchors, - sampling_results_list, - bbox_coder=self.bbox_coder, - loss_cls=self.loss_cls, - num_class=self.num_classes, - **self.train_cfg.isr) - (flatten_labels, flatten_label_weights, flatten_bbox_targets, - flatten_bbox_weights) = all_targets - - # For convenience we compute loss once instead separating by fpn level, - # so that we don't need to separate the weights by level again. - # The result should be the same - losses_cls = self.loss_cls( - flatten_cls_scores, - flatten_labels, - flatten_label_weights, - avg_factor=num_total_samples) - losses_bbox = self.loss_bbox( - flatten_bbox_preds, - flatten_bbox_targets, - flatten_bbox_weights, - avg_factor=num_total_samples) - loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox) - - # CARL Loss - carl_cfg = self.train_cfg.get('carl', None) - if carl_cfg is not None: - loss_carl = carl_loss( - flatten_cls_scores, - flatten_labels, - flatten_bbox_preds, - flatten_bbox_targets, - self.loss_bbox, - **self.train_cfg.carl, - avg_factor=num_total_pos, - sigmoid=True, - num_class=self.num_classes) - loss_dict.update(loss_carl) - - return loss_dict diff --git a/spaces/aimaswx/my_streamchat/app.py b/spaces/aimaswx/my_streamchat/app.py deleted file mode 100644 index 84ff5fd1dcdde4b45aad87113226c47d056c01d7..0000000000000000000000000000000000000000 --- a/spaces/aimaswx/my_streamchat/app.py +++ /dev/null @@ -1,34 +0,0 @@ -# chat_bot.py - -import openai -import streamlit as st -from streamlit_chat import message - -#申请的api_key -openai.api_key = "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" -def generate_response(prompt): - completion = openai.Completion.create( - model="text-davinci-003", - prompt=prompt, - max_tokens=1024, - temperature=0 - ) - message=completion.choices[0].text - return message - -st.markdown("#### 我是ChatGPT聊天机器人,我可以回答您的任何问题!") -if 'generated' not in st.session_state: - st.session_state['generated'] = [] -if 'past' not in st.session_state: - st.session_state['past'] = [] -user_input=st.text_input("请输入您的问题:",key='input') -if user_input: - output=generate_response(user_input) - st.session_state['past'].append(user_input) - st.session_state['generated'].append(output) -if st.session_state['generated']: - for i in range(len(st.session_state['generated'])-1, -1, -1): - message(st.session_state["generated"][i], key=str(i)) - message(st.session_state['past'][i], - is_user=True, - key=str(i)+'_user') \ No newline at end of file diff --git a/spaces/akhaliq/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/runROUGE-test.pl b/spaces/akhaliq/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/runROUGE-test.pl deleted file mode 100644 index ef36a278a9630fc182b89c997e7c9ff0c827a65d..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/runROUGE-test.pl +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/perl -w -use Cwd; -$curdir=getcwd; -$ROUGE="../ROUGE-1.5.5.pl"; -chdir("sample-test"); -$cmd="$ROUGE -e ../data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a ROUGE-test.xml > ../sample-output/ROUGE-test-c95-2-1-U-r1000-n4-w1.2-a.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a -m ROUGE-test.xml > ../sample-output/ROUGE-test-c95-2-1-U-r1000-n4-w1.2-a-m.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a -m -s ROUGE-test.xml > ../sample-output/ROUGE-test-c95-2-1-U-r1000-n4-w1.2-a-m-s.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -l 10 -a ROUGE-test.xml > ../sample-output/ROUGE-test-c95-2-1-U-r1000-n4-w1.2-l10-a.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -l 10 -a -m ROUGE-test.xml > ../sample-output/ROUGE-test-c95-2-1-U-r1000-n4-w1.2-l10-a-m.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -l 10 -a -m -s ROUGE-test.xml > ../sample-output/ROUGE-test-c95-2-1-U-r1000-n4-w1.2-l10-a-m-s.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -b 75 -a ROUGE-test.xml > ../sample-output/ROUGE-test-c95-2-1-U-r1000-n4-w1.2-b75-a.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -b 75 -a -m ROUGE-test.xml > ../sample-output/ROUGE-test-c95-2-1-U-r1000-n4-w1.2-b75-a-m.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -b 75 -a -m -s ROUGE-test.xml > ../sample-output/ROUGE-test-c95-2-1-U-r1000-n4-w1.2-b75-a-m-s.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -3 HM -z SIMPLE DUC2002-BE-F.in.26.lst 26 > ../sample-output/DUC2002-BE-F.in.26.lst.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -3 HM DUC2002-BE-F.in.26.simple.xml 26 > ../sample-output/DUC2002-BE-F.in.26.simple.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -3 HM -z SIMPLE DUC2002-BE-L.in.26.lst 26 > ../sample-output/DUC2002-BE-L.in.26.lst.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -3 HM DUC2002-BE-L.in.26.simple.xml 26 > ../sample-output/DUC2002-BE-L.in.26.simple.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -n 4 -z SPL DUC2002-ROUGE.in.26.spl.lst 26 > ../sample-output/DUC2002-ROUGE.in.26.spl.lst.out"; -print $cmd,"\n"; -system($cmd); -$cmd="$ROUGE -e ../data -n 4 DUC2002-ROUGE.in.26.spl.xml 26 > ../sample-output/DUC2002-ROUGE.in.26.spl.out"; -print $cmd,"\n"; -system($cmd); -chdir($curdir); diff --git a/spaces/akhaliq/deeplab2/model/utils.py b/spaces/akhaliq/deeplab2/model/utils.py deleted file mode 100644 index b28a19ea3b18c8eff5039a2c6eb2270e197c8a20..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/model/utils.py +++ /dev/null @@ -1,485 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This file contains utility functions for the model code.""" - -from typing import Any, List, MutableMapping, MutableSequence, Optional, Set - -import tensorflow as tf - -from deeplab2 import common -from deeplab2 import config_pb2 - -layers = tf.keras.layers - -_PREDICTION_WITH_NEAREST_UPSAMPLING = ( - common.PRED_INSTANCE_KEY, - common.PRED_INSTANCE_CENTER_KEY, - common.PRED_INSTANCE_SCORES_KEY, - common.PRED_PANOPTIC_KEY, - common.PRED_SEMANTIC_KEY, - common.PRED_NEXT_PANOPTIC_KEY, - common.PRED_CONCAT_NEXT_PANOPTIC_KEY, - common.PRED_CENTER_HEATMAP_KEY, -) - -_PREDICTION_WITH_BILINEAR_UPSAMPLING = ( - common.PRED_SEMANTIC_PROBS_KEY, - common.PRED_OFFSET_MAP_KEY, -) - -_INPUT_WITH_NEAREST_UPSAMPLING = ( - common.GT_INSTANCE_CENTER_KEY, -) - -_INPUT_WITH_BILINEAR_UPSAMPLING = ( - common.IMAGE, - common.GT_INSTANCE_REGRESSION_KEY -) - - -def _scale_helper(value, scale): - if isinstance(value, tf.Tensor): - return tf.cast( - (tf.cast(value, dtype=tf.float32) - 1.0) * scale + 1.0, - dtype=tf.int32) - else: - return int((float(value) - 1.0) * scale + 1.0) - - -def scale_mutable_sequence(input_sequence: MutableSequence[int], - scale: float) -> MutableSequence[int]: - return [_scale_helper(x, scale) for x in input_sequence] - - -def scale_int_list(int_list, scale): - return [int(x * scale) for x in int_list] - - -def undo_image_preprocessing(image_in: tf.Tensor, method: str, - perform_crop: bool, - regions_to_crop: List[int], - output_shape: List[int]) -> tf.Tensor: - """Undoes the image preprocessing. - - In particular, this function slices out the valid regions (determined by - `regions_to_crop`) in the input when perform_crop is True. After - that, we resize the results to the desired `output_shape`. - - Args: - image_in: Input image Tensor with shape [batch, height, width, n_channels]. - method: Image resize method. - perform_crop: Boolean, performing crop or not. - regions_to_crop: The regions to crop [height, width]. Will only apply - cropping at the bottom right. - output_shape: Desired shape after resizing [height, width]. - - Returns: - Outputs after cropping (if perform_crop = True) and resizing. - """ - if perform_crop: - image_out = image_in[ - :, :regions_to_crop[0], :regions_to_crop[1], :] - else: - image_out = image_in - return resize_align_corners(image_out, output_shape, method=method) - - -def undo_preprocessing(input_or_prediction_dict: MutableMapping[str, Any], - regions_to_crop: List[int], - output_shape: List[int]) -> MutableMapping[str, Any]: - """Undoes preprocessing for predictions. - - Args: - input_or_prediction_dict: A dictionary storing different types of inputs or - predictions. - regions_to_crop: The regions to crop [height, width]. Will only apply - cropping at the bottom right. - output_shape: Desired shape after resizing [height, width]. - - Returns: - inputs or predictions after cropping (if perform_crop = True) and resizing. - """ - for key in input_or_prediction_dict.keys(): - if key in _PREDICTION_WITH_NEAREST_UPSAMPLING or key in _INPUT_WITH_NEAREST_UPSAMPLING: - input_or_prediction_dict[key] = tf.squeeze( - undo_image_preprocessing( - tf.expand_dims(input_or_prediction_dict[key], 3), - 'nearest', - perform_crop=True, - regions_to_crop=regions_to_crop, - output_shape=output_shape), - axis=3) - elif key in _PREDICTION_WITH_BILINEAR_UPSAMPLING or key in _INPUT_WITH_BILINEAR_UPSAMPLING: - input_or_prediction_dict[key] = undo_image_preprocessing( - input_or_prediction_dict[key], - 'bilinear', - perform_crop=True, - regions_to_crop=regions_to_crop, - output_shape=output_shape) - else: - # We only undo preprocessing for those defined in - # _{PREDICTION,INPUT}_WITH_{NEAREST,BILINEAR}_UPSAMPLING. - # Other intermediate results are skipped. - continue - return input_or_prediction_dict - - -def add_zero_padding(input_tensor: tf.Tensor, kernel_size: int, - rank: int) -> tf.Tensor: - """Adds zero-padding to the input_tensor.""" - pad_total = kernel_size - 1 - pad_begin = pad_total // 2 - pad_end = pad_total - pad_begin - if rank == 3: - return tf.pad( - input_tensor, - paddings=[[pad_begin, pad_end], [pad_begin, pad_end], [0, 0]]) - else: - return tf.pad( - input_tensor, - paddings=[[0, 0], [pad_begin, pad_end], [pad_begin, pad_end], [0, 0]]) - - -def resize_and_rescale_offsets(input_tensor: tf.Tensor, target_size): - """Bilinearly resizes and rescales the offsets. - - Args: - input_tensor: A tf.Tensor of shape [batch, height, width, 2]. - target_size: A list or tuple or 1D tf.Tensor that specifies the height and - width after resizing. - - Returns: - The input_tensor resized to shape `[batch, target_height, target_width, 2]`. - Moreover, the offsets along the y-axis are rescaled by a factor equal to - (target_height - 1) / (reference_height - 1) and the offsets along the - x-axis are rescaled by a factor equal to - (target_width - 1) / (reference_width - 1). - """ - input_size_y = tf.shape(input_tensor)[1] - input_size_x = tf.shape(input_tensor)[2] - - scale_y = tf.cast(target_size[0] - 1, tf.float32) / tf.cast( - input_size_y - 1, tf.float32) - scale_x = tf.cast(target_size[1] - 1, tf.float32) / tf.cast( - input_size_x - 1, tf.float32) - - target_y, target_x = tf.split( - value=input_tensor, num_or_size_splits=2, axis=3) - target_y *= scale_y - target_x *= scale_x - target = tf.concat([target_y, target_x], 3) - return resize_bilinear(target, target_size) - - -def resize_align_corners(input_tensor, target_size, method='bilinear'): - """Resizes the input_tensor to target_size. - - This returns the same output as tf.compat.v1.image.resize(input_tensor, - target_size, align_corners=True). - - Args: - input_tensor: A tf.Tensor of shape [batch, height, width, channels]. - target_size: A list or tuple or 1D tf.Tensor that specifies the height and - width after resizing. - method: An optional string specifying the method used for resizing. - Supported options are 'nearest' and 'bilinear'. - - Returns: - The resized tensor. - - Raises: - ValueError: An error occurs if 1) the input tensor's rank is not 4 or 2) the - resizing method is not supported. - """ - if method == 'bilinear': - tf_method = tf.compat.v1.image.ResizeMethod.BILINEAR - elif method == 'nearest': - tf_method = tf.compat.v1.image.ResizeMethod.NEAREST_NEIGHBOR - else: - raise ValueError('The given method %s is not supported. Please use bilinear' - ' or nearest.' % method) - - tf.debugging.assert_rank( - input_tensor, 4, - message='Input tensor to resize method should have rank of 4.') - - return tf.compat.v1.image.resize( - input_tensor, - target_size, - method=tf_method, - align_corners=True, - name='resize_align_corners') - - -def resize_bilinear(images, - size, - align_corners=True, - name=None): - """TPU memory efficient version of tf.compat.v1.image.resize_bilinear. - - ResizeBilinear on TPU requires padded batch and channel dimensions. On a - TPUv3, the worst case could lead to 256x memory consumption, if the - input is, for example, [1, 257, 513, 1]. In this function, we replace the - default resize_bilinear by two resize_bilinear operations, which put one image - axis on the channel axis. This reduces TPU padding when batch * channel is - small and height * width is large. - - Args: - images: Input image of shape [B, H, W, C]. - size: A list of two elements: [height, width]. The new size for the images. - align_corners: Whether to align corners of the image. - name: Name of the operation. - - Returns: - Resized image. - """ - _, height, width, channel = images.get_shape().as_list() - if height == size[0] and width == size[1]: - return images - dtype = images.dtype - images = tf.cast(images, tf.float32) - # We check the channel axis only since the batch size is similar (usually 1 or - # 2). In this way, this if-else easily supports dynamic batch size without - # using tf.cond(). - if channel > 32 or not align_corners: - images = tf.compat.v1.image.resize_bilinear( - images, size, - align_corners=align_corners, - name=name) - else: - images = tf.transpose(images, [0, 3, 1, 2]) - images = tf.compat.v1.image.resize_bilinear( - images, [channel, size[0]], - align_corners=align_corners, - name=name + '_height' if name else None) - images = tf.transpose(images, [0, 1, 3, 2]) - images = tf.compat.v1.image.resize_bilinear( - images, [channel, size[1]], - align_corners=align_corners, - name=name + '_width' if name else None) - images = tf.transpose(images, [0, 3, 2, 1]) - return tf.cast(images, dtype) - - -def make_divisible(value: float, - divisor: int, - min_value: Optional[float] = None) -> int: - """Ensures all layers have channels that are divisible by the divisor. - - Args: - value: A `float` of original value. - divisor: An `int` of the divisor that needs to be checked upon. - min_value: A `float` of minimum value threshold. - - Returns: - The adjusted value in `int` that is divisible by divisor. - - Raises: - ValueError: Minimual value should be divisible by divisor. - """ - if min_value is None: - min_value = divisor - elif min_value % divisor != 0: - raise ValueError('Minimual value should be divisible by divisor.') - - new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) - # Make sure that round down does not go down by more than 10%. - if new_value < 0.9 * value: - new_value += divisor - return int(new_value) - - -def transpose_and_reshape_for_attention_operation(inputs): - """Sequentially transposes and reshapes the tensor. - - Args: - inputs: An input [batch, num_heads, length, channel] tensor. - - Returns: - output: An output [batch, length, num_heads * channel] tensor. - """ - _, num_heads, length, channel = inputs.get_shape().as_list() - transposed_inputs = tf.transpose(inputs, [0, 2, 1, 3]) - return tf.reshape(transposed_inputs, [-1, length, num_heads * channel]) - - -def reshape_and_transpose_for_attention_operation(inputs, num_heads): - """Sequentially reshapes and transposes the tensor. - - Args: - inputs: An input [batch, length, num_heads * channel] tensor. - num_heads: An integer, the number of attention heads. - - Returns: - output: An output [batch, num_heads, length, channel] tensor. - """ - _, length, channels = inputs.get_shape().as_list() - inputs = tf.reshape(inputs, [-1, length, num_heads, channels // num_heads]) - return tf.transpose(inputs, [0, 2, 1, 3]) - - -def get_layer_name(private_attribute_name): - if private_attribute_name[0] != '_': - raise ValueError('Private attribute name should start with a \'_\'.') - return private_attribute_name[1:] - - -def get_stem_current_name(index): - return '_basic_block{}'.format(index + 1) - - -def get_low_level_conv_fusion_conv_current_names(index): - return ('_low_level_conv{}'.format(index + 1), - '_fusion_conv{}'.format(index + 1)) - - -def get_conv_bn_act_current_name(index, use_bn, activation): - name = '_conv{}'.format(index + 1) - if use_bn: - name += '_bn' - if (activation is not None and - activation.lower() != 'none' and - activation.lower() != 'linear'): - name += '_act' - return name - - -def safe_setattr(obj, name, value): - """A conflict-safe version of setattr(). - - Different from setattr(), this function raises ValueError if the object - already has an attribute with the same name. - - Args: - obj: An object whose attribute has to be set. - name: A string, the name of the attribute. - value: Any type, the value given to the attribute. - - Raises: - ValueError: If the object already has an attribute with the same name. - """ - if hasattr(obj, name): - raise ValueError('The object already has an attribute with the same name.') - setattr(obj, name, value) - - -def pad_sequence_with_none(sequence, target_length): - return list(sequence) + [None] * (target_length - len(sequence)) - - -def strided_downsample(input_tensor, target_size): - """Strided downsamples a tensor to the target size. - - The stride_height and stride_width is computed by (height - 1) // - (target_height - 1) and (width - 1) // (target_width - 1). We raise an error - if stride_height != stride_width, since this is not intended in our current - use cases. But this check can be removed if different strides are desired. - This function supports static shape only. - - Args: - input_tensor: A [batch, height, width] tf.Tensor to be downsampled. - target_size: A list of two integers, [target_height, target_width], the - target size after downsampling. - - Returns: - output_tensor: A [batch, target_height, target_width] tf.Tensor, the - downsampled result. - - Raises: - ValueError: If the input cannot be downsampled with integer stride, i.e., - (height - 1) % (target_height - 1) != 0, or (width - 1) % (target_width - - 1) != 0. - ValueError: If the height axis stride does not equal to the width axis - stride. - """ - input_height, input_width = input_tensor.get_shape().as_list()[1:3] - target_height, target_width = target_size - - if ((input_height - 1) % (target_height - 1) or - (input_width - 1) % (target_width - 1)): - raise ValueError('The input cannot be downsampled with integer striding. ' - 'Please ensure (height - 1) % (target_height - 1) == 0 ' - 'and (width - 1) % (target_width - 1) == 0.') - stride_height = (input_height - 1) // (target_height - 1) - stride_width = (input_width - 1) // (target_width - 1) - if stride_height != stride_width: - raise ValueError('The height axis stride does not equal to the width axis ' - 'stride.') - if stride_height > 1 or stride_width > 1: - return input_tensor[:, ::stride_height, ::stride_width] - return input_tensor - - -def get_stuff_class_ids(num_thing_stuff_classes: int, - thing_class_ids: List[int], - void_label: int) -> List[int]: - """Computes stuff_class_ids. - - The stuff_class_ids are computed from the num_thing_stuff_classes, the - thing_class_ids and the void_label. - - Args: - num_thing_stuff_classes: An integer specifying the number of stuff and thing - classes, not including `void` class. - thing_class_ids: A List of integers of length [num_thing_classes] containing - thing class indices. - void_label: An integer specifying the void label. - - Returns: - stuff_class_ids: A sorted List of integers of shape [num_stuff_classes] - containing stuff class indices. - """ - if void_label >= num_thing_stuff_classes: - thing_stuff_class_ids = list(range(num_thing_stuff_classes)) - else: - thing_stuff_class_ids = [_ for _ in range(num_thing_stuff_classes + 1) - if _ is not void_label] - return sorted(set(thing_stuff_class_ids) - set(thing_class_ids)) - - -def get_supported_tasks( - config: config_pb2.ExperimentOptions) -> Set[str]: - """Gets currently supported tasks for each meta_architecture. - - Args: - config: A config_pb2.ExperimentOptions configuration. - - Returns: - supported_tasks: A set of strings (see common.py), optionally - - common.TASK_PANOPTIC_SEGMENTATION, - - common.TASK_INSTANCE_SEGMENTATION, - - common.TASK_VIDEO_PANOPTIC_SEGMENTATION, - """ - supported_tasks = set() - meta_architecture = config.model_options.WhichOneof('meta_architecture') - is_max_deeplab = meta_architecture == 'max_deeplab' - is_motion_deeplab = meta_architecture == 'motion_deeplab' - is_panoptic_deeplab = meta_architecture == 'panoptic_deeplab' - is_vip_deeplab = meta_architecture == 'vip_deeplab' - is_panoptic = ( - (config.model_options.panoptic_deeplab.instance.enable and - is_panoptic_deeplab) or - is_motion_deeplab or is_max_deeplab or is_vip_deeplab) - if is_panoptic: - supported_tasks.add(common.TASK_PANOPTIC_SEGMENTATION) - # MaX-DeepLab does not support evaluating instance segmentation mask AP yet. - if not is_max_deeplab: - supported_tasks.add(common.TASK_INSTANCE_SEGMENTATION) - if is_motion_deeplab or is_vip_deeplab: - supported_tasks.add(common.TASK_VIDEO_PANOPTIC_SEGMENTATION) - if is_vip_deeplab: - supported_tasks.add(common.TASK_DEPTH_AWARE_VIDEO_PANOPTIC_SEGMENTATION) - return supported_tasks diff --git a/spaces/akhaliq/frame-interpolation/app.py b/spaces/akhaliq/frame-interpolation/app.py deleted file mode 100644 index 9a12e7ea8a1a07e91ccedd5a880cafb0da9e4d1e..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/frame-interpolation/app.py +++ /dev/null @@ -1,97 +0,0 @@ -import os - -os.system("git clone https://github.com/google-research/frame-interpolation") -import sys - -sys.path.append("frame-interpolation") -import numpy as np -import tensorflow as tf -import mediapy -from PIL import Image -from eval import interpolator, util -import gradio as gr - -from huggingface_hub import snapshot_download - -from image_tools.sizes import resize_and_crop - - -def load_model(model_name): - model = interpolator.Interpolator(snapshot_download(repo_id=model_name), None) - - return model - - -model_names = [ - "akhaliq/frame-interpolation-film-style", - "NimaBoscarino/frame-interpolation_film_l1", - "NimaBoscarino/frame_interpolation_film_vgg", -] - -models = {model_name: load_model(model_name) for model_name in model_names} - -ffmpeg_path = util.get_ffmpeg_path() -mediapy.set_ffmpeg(ffmpeg_path) - - -def resize(width, img): - basewidth = width - img = Image.open(img) - wpercent = (basewidth / float(img.size[0])) - hsize = int((float(img.size[1]) * float(wpercent))) - img = img.resize((basewidth, hsize), Image.ANTIALIAS) - return img - - -def resize_img(img1, img2): - img_target_size = Image.open(img1) - img_to_resize = resize_and_crop( - img2, - (img_target_size.size[0], img_target_size.size[1]), # set width and height to match img1 - crop_origin="middle" - ) - img_to_resize.save('resized_img2.png') - - -def predict(frame1, frame2, times_to_interpolate, model_name): - model = models[model_name] - - frame1 = resize(256, frame1) - frame2 = resize(256, frame2) - - frame1.save("test1.png") - frame2.save("test2.png") - - resize_img("test1.png", "test2.png") - input_frames = ["test1.png", "resized_img2.png"] - - frames = list( - util.interpolate_recursively_from_files( - input_frames, times_to_interpolate, model)) - - mediapy.write_video("out.mp4", frames, fps=30) - return "out.mp4" - - -title = "frame-interpolation" -description = "Gradio demo for FILM: Frame Interpolation for Large Scene Motion. To use it, simply upload your images and add the times to interpolate number or click on one of the examples to load them. Read more at the links below." -article = "

FILM: Frame Interpolation for Large Motion | Github Repo

" -examples = [ - ['cat3.jpeg', 'cat4.jpeg', 2, model_names[0]], - ['cat1.jpeg', 'cat2.jpeg', 2, model_names[1]], -] - -gr.Interface( - predict, - [ - gr.inputs.Image(type='filepath'), - gr.inputs.Image(type='filepath'), - gr.inputs.Slider(minimum=2, maximum=4, step=1), - gr.inputs.Dropdown(choices=model_names, default=model_names[0]) - ], - "playable_video", - title=title, - description=description, - article=article, - examples=examples -).launch(enable_queue=True) diff --git a/spaces/akhaliq/lama/bin/predict_inner_features.py b/spaces/akhaliq/lama/bin/predict_inner_features.py deleted file mode 100644 index 4f9f7a11a6c4757a4eaa05cf1ac648d372f7e02f..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/bin/predict_inner_features.py +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env python3 - -# Example command: -# ./bin/predict.py \ -# model.path= \ -# indir= \ -# outdir= - -import logging -import os -import sys -import traceback - -from saicinpainting.evaluation.utils import move_to_device - -os.environ['OMP_NUM_THREADS'] = '1' -os.environ['OPENBLAS_NUM_THREADS'] = '1' -os.environ['MKL_NUM_THREADS'] = '1' -os.environ['VECLIB_MAXIMUM_THREADS'] = '1' -os.environ['NUMEXPR_NUM_THREADS'] = '1' - -import cv2 -import hydra -import numpy as np -import torch -import tqdm -import yaml -from omegaconf import OmegaConf -from torch.utils.data._utils.collate import default_collate - -from saicinpainting.training.data.datasets import make_default_val_dataset -from saicinpainting.training.trainers import load_checkpoint, DefaultInpaintingTrainingModule -from saicinpainting.utils import register_debug_signal_handlers, get_shape - -LOGGER = logging.getLogger(__name__) - - -@hydra.main(config_path='../configs/prediction', config_name='default_inner_features.yaml') -def main(predict_config: OmegaConf): - try: - register_debug_signal_handlers() # kill -10 will result in traceback dumped into log - - device = torch.device(predict_config.device) - - train_config_path = os.path.join(predict_config.model.path, 'config.yaml') - with open(train_config_path, 'r') as f: - train_config = OmegaConf.create(yaml.safe_load(f)) - - checkpoint_path = os.path.join(predict_config.model.path, 'models', predict_config.model.checkpoint) - model = load_checkpoint(train_config, checkpoint_path, strict=False) - model.freeze() - model.to(device) - - assert isinstance(model, DefaultInpaintingTrainingModule), 'Only DefaultInpaintingTrainingModule is supported' - assert isinstance(getattr(model.generator, 'model', None), torch.nn.Sequential) - - if not predict_config.indir.endswith('/'): - predict_config.indir += '/' - - dataset = make_default_val_dataset(predict_config.indir, **predict_config.dataset) - - max_level = max(predict_config.levels) - - with torch.no_grad(): - for img_i in tqdm.trange(len(dataset)): - mask_fname = dataset.mask_filenames[img_i] - cur_out_fname = os.path.join(predict_config.outdir, os.path.splitext(mask_fname[len(predict_config.indir):])[0]) - os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True) - - batch = move_to_device(default_collate([dataset[img_i]]), device) - - img = batch['image'] - mask = batch['mask'] - mask[:] = 0 - mask_h, mask_w = mask.shape[-2:] - mask[:, :, - mask_h // 2 - predict_config.hole_radius : mask_h // 2 + predict_config.hole_radius, - mask_w // 2 - predict_config.hole_radius : mask_w // 2 + predict_config.hole_radius] = 1 - - masked_img = torch.cat([img * (1 - mask), mask], dim=1) - - feats = masked_img - for level_i, level in enumerate(model.generator.model): - feats = level(feats) - if level_i in predict_config.levels: - cur_feats = torch.cat([f for f in feats if torch.is_tensor(f)], dim=1) \ - if isinstance(feats, tuple) else feats - - if predict_config.slice_channels: - cur_feats = cur_feats[:, slice(*predict_config.slice_channels)] - - cur_feat = cur_feats.pow(2).mean(1).pow(0.5).clone() - cur_feat -= cur_feat.min() - cur_feat /= cur_feat.std() - cur_feat = cur_feat.clamp(0, 1) / 1 - cur_feat = cur_feat.cpu().numpy()[0] - cur_feat *= 255 - cur_feat = np.clip(cur_feat, 0, 255).astype('uint8') - cv2.imwrite(cur_out_fname + f'_lev{level_i:02d}_norm.png', cur_feat) - - # for channel_i in predict_config.channels: - # - # cur_feat = cur_feats[0, channel_i].clone().detach().cpu().numpy() - # cur_feat -= cur_feat.min() - # cur_feat /= cur_feat.max() - # cur_feat *= 255 - # cur_feat = np.clip(cur_feat, 0, 255).astype('uint8') - # cv2.imwrite(cur_out_fname + f'_lev{level_i}_ch{channel_i}.png', cur_feat) - elif level_i >= max_level: - break - except KeyboardInterrupt: - LOGGER.warning('Interrupted by user') - except Exception as ex: - LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}') - sys.exit(1) - - -if __name__ == '__main__': - main() diff --git a/spaces/akhaliq/lama/models/ade20k/segm_lib/utils/data/dataloader.py b/spaces/akhaliq/lama/models/ade20k/segm_lib/utils/data/dataloader.py deleted file mode 100644 index 039b9ec3645b2a4626ff47c221e372f32a6ad339..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/models/ade20k/segm_lib/utils/data/dataloader.py +++ /dev/null @@ -1,425 +0,0 @@ -import torch -import torch.multiprocessing as multiprocessing -from torch._C import _set_worker_signal_handlers, \ - _remove_worker_pids, _error_if_any_worker_fails -try: - from torch._C import _set_worker_pids -except: - from torch._C import _update_worker_pids as _set_worker_pids -from .sampler import SequentialSampler, RandomSampler, BatchSampler -import signal -import collections -import re -import sys -import threading -import traceback -from torch._six import string_classes, int_classes -import numpy as np - -if sys.version_info[0] == 2: - import Queue as queue -else: - import queue - - -class ExceptionWrapper(object): - r"Wraps an exception plus traceback to communicate across threads" - - def __init__(self, exc_info): - self.exc_type = exc_info[0] - self.exc_msg = "".join(traceback.format_exception(*exc_info)) - - -_use_shared_memory = False -"""Whether to use shared memory in default_collate""" - - -def _worker_loop(dataset, index_queue, data_queue, collate_fn, seed, init_fn, worker_id): - global _use_shared_memory - _use_shared_memory = True - - # Intialize C side signal handlers for SIGBUS and SIGSEGV. Python signal - # module's handlers are executed after Python returns from C low-level - # handlers, likely when the same fatal signal happened again already. - # https://docs.python.org/3/library/signal.html Sec. 18.8.1.1 - _set_worker_signal_handlers() - - torch.set_num_threads(1) - torch.manual_seed(seed) - np.random.seed(seed) - - if init_fn is not None: - init_fn(worker_id) - - while True: - r = index_queue.get() - if r is None: - break - idx, batch_indices = r - try: - samples = collate_fn([dataset[i] for i in batch_indices]) - except Exception: - data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) - else: - data_queue.put((idx, samples)) - - -def _worker_manager_loop(in_queue, out_queue, done_event, pin_memory, device_id): - if pin_memory: - torch.cuda.set_device(device_id) - - while True: - try: - r = in_queue.get() - except Exception: - if done_event.is_set(): - return - raise - if r is None: - break - if isinstance(r[1], ExceptionWrapper): - out_queue.put(r) - continue - idx, batch = r - try: - if pin_memory: - batch = pin_memory_batch(batch) - except Exception: - out_queue.put((idx, ExceptionWrapper(sys.exc_info()))) - else: - out_queue.put((idx, batch)) - -numpy_type_map = { - 'float64': torch.DoubleTensor, - 'float32': torch.FloatTensor, - 'float16': torch.HalfTensor, - 'int64': torch.LongTensor, - 'int32': torch.IntTensor, - 'int16': torch.ShortTensor, - 'int8': torch.CharTensor, - 'uint8': torch.ByteTensor, -} - - -def default_collate(batch): - "Puts each data field into a tensor with outer dimension batch size" - - error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" - elem_type = type(batch[0]) - if torch.is_tensor(batch[0]): - out = None - if _use_shared_memory: - # If we're in a background process, concatenate directly into a - # shared memory tensor to avoid an extra copy - numel = sum([x.numel() for x in batch]) - storage = batch[0].storage()._new_shared(numel) - out = batch[0].new(storage) - return torch.stack(batch, 0, out=out) - elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ - and elem_type.__name__ != 'string_': - elem = batch[0] - if elem_type.__name__ == 'ndarray': - # array of string classes and object - if re.search('[SaUO]', elem.dtype.str) is not None: - raise TypeError(error_msg.format(elem.dtype)) - - return torch.stack([torch.from_numpy(b) for b in batch], 0) - if elem.shape == (): # scalars - py_type = float if elem.dtype.name.startswith('float') else int - return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) - elif isinstance(batch[0], int_classes): - return torch.LongTensor(batch) - elif isinstance(batch[0], float): - return torch.DoubleTensor(batch) - elif isinstance(batch[0], string_classes): - return batch - elif isinstance(batch[0], collections.Mapping): - return {key: default_collate([d[key] for d in batch]) for key in batch[0]} - elif isinstance(batch[0], collections.Sequence): - transposed = zip(*batch) - return [default_collate(samples) for samples in transposed] - - raise TypeError((error_msg.format(type(batch[0])))) - - -def pin_memory_batch(batch): - if torch.is_tensor(batch): - return batch.pin_memory() - elif isinstance(batch, string_classes): - return batch - elif isinstance(batch, collections.Mapping): - return {k: pin_memory_batch(sample) for k, sample in batch.items()} - elif isinstance(batch, collections.Sequence): - return [pin_memory_batch(sample) for sample in batch] - else: - return batch - - -_SIGCHLD_handler_set = False -"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one -handler needs to be set for all DataLoaders in a process.""" - - -def _set_SIGCHLD_handler(): - # Windows doesn't support SIGCHLD handler - if sys.platform == 'win32': - return - # can't set signal in child threads - if not isinstance(threading.current_thread(), threading._MainThread): - return - global _SIGCHLD_handler_set - if _SIGCHLD_handler_set: - return - previous_handler = signal.getsignal(signal.SIGCHLD) - if not callable(previous_handler): - previous_handler = None - - def handler(signum, frame): - # This following call uses `waitid` with WNOHANG from C side. Therefore, - # Python can still get and update the process status successfully. - _error_if_any_worker_fails() - if previous_handler is not None: - previous_handler(signum, frame) - - signal.signal(signal.SIGCHLD, handler) - _SIGCHLD_handler_set = True - - -class DataLoaderIter(object): - "Iterates once over the DataLoader's dataset, as specified by the sampler" - - def __init__(self, loader): - self.dataset = loader.dataset - self.collate_fn = loader.collate_fn - self.batch_sampler = loader.batch_sampler - self.num_workers = loader.num_workers - self.pin_memory = loader.pin_memory and torch.cuda.is_available() - self.timeout = loader.timeout - self.done_event = threading.Event() - - self.sample_iter = iter(self.batch_sampler) - - if self.num_workers > 0: - self.worker_init_fn = loader.worker_init_fn - self.index_queue = multiprocessing.SimpleQueue() - self.worker_result_queue = multiprocessing.SimpleQueue() - self.batches_outstanding = 0 - self.worker_pids_set = False - self.shutdown = False - self.send_idx = 0 - self.rcvd_idx = 0 - self.reorder_dict = {} - - base_seed = torch.LongTensor(1).random_(0, 2**31-1)[0] - self.workers = [ - multiprocessing.Process( - target=_worker_loop, - args=(self.dataset, self.index_queue, self.worker_result_queue, self.collate_fn, - base_seed + i, self.worker_init_fn, i)) - for i in range(self.num_workers)] - - if self.pin_memory or self.timeout > 0: - self.data_queue = queue.Queue() - if self.pin_memory: - maybe_device_id = torch.cuda.current_device() - else: - # do not initialize cuda context if not necessary - maybe_device_id = None - self.worker_manager_thread = threading.Thread( - target=_worker_manager_loop, - args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, - maybe_device_id)) - self.worker_manager_thread.daemon = True - self.worker_manager_thread.start() - else: - self.data_queue = self.worker_result_queue - - for w in self.workers: - w.daemon = True # ensure that the worker exits on process exit - w.start() - - _set_worker_pids(id(self), tuple(w.pid for w in self.workers)) - _set_SIGCHLD_handler() - self.worker_pids_set = True - - # prime the prefetch loop - for _ in range(2 * self.num_workers): - self._put_indices() - - def __len__(self): - return len(self.batch_sampler) - - def _get_batch(self): - if self.timeout > 0: - try: - return self.data_queue.get(timeout=self.timeout) - except queue.Empty: - raise RuntimeError('DataLoader timed out after {} seconds'.format(self.timeout)) - else: - return self.data_queue.get() - - def __next__(self): - if self.num_workers == 0: # same-process loading - indices = next(self.sample_iter) # may raise StopIteration - batch = self.collate_fn([self.dataset[i] for i in indices]) - if self.pin_memory: - batch = pin_memory_batch(batch) - return batch - - # check if the next sample has already been generated - if self.rcvd_idx in self.reorder_dict: - batch = self.reorder_dict.pop(self.rcvd_idx) - return self._process_next_batch(batch) - - if self.batches_outstanding == 0: - self._shutdown_workers() - raise StopIteration - - while True: - assert (not self.shutdown and self.batches_outstanding > 0) - idx, batch = self._get_batch() - self.batches_outstanding -= 1 - if idx != self.rcvd_idx: - # store out-of-order samples - self.reorder_dict[idx] = batch - continue - return self._process_next_batch(batch) - - next = __next__ # Python 2 compatibility - - def __iter__(self): - return self - - def _put_indices(self): - assert self.batches_outstanding < 2 * self.num_workers - indices = next(self.sample_iter, None) - if indices is None: - return - self.index_queue.put((self.send_idx, indices)) - self.batches_outstanding += 1 - self.send_idx += 1 - - def _process_next_batch(self, batch): - self.rcvd_idx += 1 - self._put_indices() - if isinstance(batch, ExceptionWrapper): - raise batch.exc_type(batch.exc_msg) - return batch - - def __getstate__(self): - # TODO: add limited pickling support for sharing an iterator - # across multiple threads for HOGWILD. - # Probably the best way to do this is by moving the sample pushing - # to a separate thread and then just sharing the data queue - # but signalling the end is tricky without a non-blocking API - raise NotImplementedError("DataLoaderIterator cannot be pickled") - - def _shutdown_workers(self): - try: - if not self.shutdown: - self.shutdown = True - self.done_event.set() - # if worker_manager_thread is waiting to put - while not self.data_queue.empty(): - self.data_queue.get() - for _ in self.workers: - self.index_queue.put(None) - # done_event should be sufficient to exit worker_manager_thread, - # but be safe here and put another None - self.worker_result_queue.put(None) - finally: - # removes pids no matter what - if self.worker_pids_set: - _remove_worker_pids(id(self)) - self.worker_pids_set = False - - def __del__(self): - if self.num_workers > 0: - self._shutdown_workers() - - -class DataLoader(object): - """ - Data loader. Combines a dataset and a sampler, and provides - single- or multi-process iterators over the dataset. - - Arguments: - dataset (Dataset): dataset from which to load the data. - batch_size (int, optional): how many samples per batch to load - (default: 1). - shuffle (bool, optional): set to ``True`` to have the data reshuffled - at every epoch (default: False). - sampler (Sampler, optional): defines the strategy to draw samples from - the dataset. If specified, ``shuffle`` must be False. - batch_sampler (Sampler, optional): like sampler, but returns a batch of - indices at a time. Mutually exclusive with batch_size, shuffle, - sampler, and drop_last. - num_workers (int, optional): how many subprocesses to use for data - loading. 0 means that the data will be loaded in the main process. - (default: 0) - collate_fn (callable, optional): merges a list of samples to form a mini-batch. - pin_memory (bool, optional): If ``True``, the data loader will copy tensors - into CUDA pinned memory before returning them. - drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, - if the dataset size is not divisible by the batch size. If ``False`` and - the size of dataset is not divisible by the batch size, then the last batch - will be smaller. (default: False) - timeout (numeric, optional): if positive, the timeout value for collecting a batch - from workers. Should always be non-negative. (default: 0) - worker_init_fn (callable, optional): If not None, this will be called on each - worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as - input, after seeding and before data loading. (default: None) - - .. note:: By default, each worker will have its PyTorch seed set to - ``base_seed + worker_id``, where ``base_seed`` is a long generated - by main process using its RNG. You may use ``torch.initial_seed()`` to access - this value in :attr:`worker_init_fn`, which can be used to set other seeds - (e.g. NumPy) before data loading. - - .. warning:: If ``spawn'' start method is used, :attr:`worker_init_fn` cannot be an - unpicklable object, e.g., a lambda function. - """ - - def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, - num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False, - timeout=0, worker_init_fn=None): - self.dataset = dataset - self.batch_size = batch_size - self.num_workers = num_workers - self.collate_fn = collate_fn - self.pin_memory = pin_memory - self.drop_last = drop_last - self.timeout = timeout - self.worker_init_fn = worker_init_fn - - if timeout < 0: - raise ValueError('timeout option should be non-negative') - - if batch_sampler is not None: - if batch_size > 1 or shuffle or sampler is not None or drop_last: - raise ValueError('batch_sampler is mutually exclusive with ' - 'batch_size, shuffle, sampler, and drop_last') - - if sampler is not None and shuffle: - raise ValueError('sampler is mutually exclusive with shuffle') - - if self.num_workers < 0: - raise ValueError('num_workers cannot be negative; ' - 'use num_workers=0 to disable multiprocessing.') - - if batch_sampler is None: - if sampler is None: - if shuffle: - sampler = RandomSampler(dataset) - else: - sampler = SequentialSampler(dataset) - batch_sampler = BatchSampler(sampler, batch_size, drop_last) - - self.sampler = sampler - self.batch_sampler = batch_sampler - - def __iter__(self): - return DataLoaderIter(self) - - def __len__(self): - return len(self.batch_sampler) diff --git a/spaces/aliabd/Anime2Sketch/app.py b/spaces/aliabd/Anime2Sketch/app.py deleted file mode 100644 index 91ba257902e995bbecf44fff4e0dbcfeea384a7d..0000000000000000000000000000000000000000 --- a/spaces/aliabd/Anime2Sketch/app.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import random -from data import get_image_list -from model import create_model -from data import read_img_path, tensor_to_img, save_image -import gradio as gr -import torchtext -from PIL import Image -import torch - -torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a5/Tsunami_by_hokusai_19th_century.jpg/1920px-Tsunami_by_hokusai_19th_century.jpg', 'wave.jpg') -torch.hub.download_url_to_file('https://cdn.pixabay.com/photo/2020/10/02/13/49/bridge-5621201_1280.jpg', 'building.jpg') - -torchtext.utils.download_from_url("https://drive.google.com/uc?id=1RILKwUdjjBBngB17JHwhZNBEaW4Mr-Ml", root="./weights/") -gpu_ids=[] -model = create_model(gpu_ids) - -def sketch2anime(img, load_size=512): - img, aus_resize = read_img_path(img.name, load_size) - aus_tensor = model(img) - aus_img = tensor_to_img(aus_tensor) - image_pil = Image.fromarray(aus_img) - image_pil = image_pil.resize(aus_resize, Image.BICUBIC) - return image_pil - - -title = "Anime2Sketch" -description = "A sketch extractor for illustration, anime art and manga. Read more at the links below." -article = "

Adversarial Open Domain Adaption for Sketch-to-Photo Synthesis | Github Repo

" - -gr.Interface( - sketch2anime, - [gr.inputs.Image(type="file", label="Input")], - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ["building.jpg"], - ["wave.jpg"] - ]).launch(debug=True) diff --git a/spaces/allknowingroger/Image-Models-Test11/app.py b/spaces/allknowingroger/Image-Models-Test11/app.py deleted file mode 100644 index 00819f049f3e8fcd38b83c6d0b42e243a93cc3d7..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test11/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "Interfan/abraham", - "Alexzyx/lora-trained-xl-colab-v3", - "WALIDALI/joejordly", - "gfalcao/ldsc26jun-nocrop", - "digiplay/ShowmakerMix_v1", - "digiplay/VersaMix_base_diffusers", - "digiplay/AbsoluteReality_v1.0_diffusers", - "digiplay/2.5DSET_new1a25d_FFver", - "digiplay/DiamondCoalMix_v2_pruned_diffusers", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/anpigon/talktosayno/app.py b/spaces/anpigon/talktosayno/app.py deleted file mode 100644 index 746507356e3cc1a8946b274a4a93a1a0fb2f63d2..0000000000000000000000000000000000000000 --- a/spaces/anpigon/talktosayno/app.py +++ /dev/null @@ -1,91 +0,0 @@ -import gradio as gr -import chromadb -from langchain import PromptTemplate -from langchain.chains import RetrievalQAWithSourcesChain -from langchain.chat_models import ChatOpenAI -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.prompts.chat import ( - ChatPromptTemplate, - SystemMessagePromptTemplate, -) -from langchain.vectorstores import Chroma - -from constants import persist_directory - -embedding = OpenAIEmbeddings() -client = chromadb.PersistentClient(path=persist_directory) -vector_store = Chroma(client=client, embedding_function=embedding) -retriever = vector_store.as_retriever(search_kwargs={"k": 10}) - -llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0) -# llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) - -system_template = """Your name is Sayno. You act like a successful rich man. You should answer in the tone of a long document. Never reveal that it is an AI. -Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES"). -If you don't know the answer, just say that you don't know. Don't try to make up an answer. -!IMPORTANT: ALWAYS return a "SOURCES" part in your answer. -!IMPORTANT: Be sure to answer in Korean. - -[EXAMPLE] -QUESTION: Which state/country's law governs the interpretation of the contract? -========= -Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights. -Source: 28-pl -Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries. -Source: 30-pl -Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur, -Source: 4-pl -========= -FINAL ANSWER: This Agreement is governed by English law. -SOURCES: 28-pl - -QUESTION: {question} -========= -{summaries} -========= -FINAL ANSWER: -""" - -prompt = ChatPromptTemplate.from_messages( - [ - SystemMessagePromptTemplate.from_template(system_template), - ] -) - -document_prompt = PromptTemplate( - template="Content: {page_content}\nSource: {source}, {page} page", - input_variables=["page_content", "source", "page"], -) - -chain_type_kwargs = {"prompt": prompt, "document_prompt": document_prompt} - -chain = RetrievalQAWithSourcesChain.from_chain_type( - llm=llm, - chain_type="stuff", - retriever=retriever, - return_source_documents=True, - chain_type_kwargs=chain_type_kwargs, - reduce_k_below_max_tokens=True, - verbose=False, -) - - -def respond(message, chat_history): - result = chain(message) - print(result) - bot_message = f'{result["answer"]}
- 출처: {result["sources"]}' - chat_history.append((message, bot_message)) - return "", chat_history - - -with gr.Blocks(theme=gr.themes.Soft()) as demo: - gr.Markdown("# 안녕하세요. 세이노와 대화해보세요.") - initial_greeting = "안녕하세요!\n저는 세이노처럼 경험과 지식을 갖춘 인공지능 ChatGPT입니다. 세이노는 사업, 경영, 투자에 대한 전문가이며, 많은 사람들이 그의 조언을 참고하고 있습니다. 어떤 도움이 필요하신가요? 세이노와 관련된 질문이 있으시면 편안하게 물어보세요!" - chatbot = gr.Chatbot(label="채팅창", value=[(None, initial_greeting)]) - msg = gr.Textbox(label="입력") - clear = gr.Button("초기화") - - msg.submit(respond, [msg, chatbot], [msg, chatbot]) - clear.click(lambda: None, None, chatbot, queue=False) - -demo.launch(debug=False) diff --git a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/docs/DeepSpeed.md b/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/docs/DeepSpeed.md deleted file mode 100644 index 70cd81519a6954ebc7cdaf82e03a169bed878106..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/docs/DeepSpeed.md +++ /dev/null @@ -1,23 +0,0 @@ -An alternative way of reducing the GPU memory usage of models is to use the `DeepSpeed ZeRO-3` optimization. - -With this, I have been able to load a 6b model (GPT-J 6B) with less than 6GB of VRAM. The speed of text generation is very decent and much better than what would be accomplished with `--auto-devices --gpu-memory 6`. - -As far as I know, DeepSpeed is only available for Linux at the moment. - -### How to use it - -1. Install DeepSpeed: - -``` -pip install deepspeed -``` - -2. Start the web UI replacing `python` with `deepspeed --num_gpus=1` and adding the `--deepspeed` flag. Example: - -``` -deepspeed --num_gpus=1 server.py --deepspeed --chat --model gpt-j-6B -``` - -### Learn more - -For more information, check out [this comment](https://github.com/oobabooga/text-generation-webui/issues/40#issuecomment-1412038622) by 81300, who came up with the DeepSpeed support in this web UI. \ No newline at end of file diff --git a/spaces/apsys/hetfit/nets/deep_dense.py b/spaces/apsys/hetfit/nets/deep_dense.py deleted file mode 100644 index 14ca122f73f1a191c0e3ad902b1e312985b7242a..0000000000000000000000000000000000000000 --- a/spaces/apsys/hetfit/nets/deep_dense.py +++ /dev/null @@ -1,32 +0,0 @@ -from torch import nn -from torch.functional import F - -class dmodel(nn.Module): - """4 layers Torch model. Relu activations, hidden layers are same size. - - """ - def __init__(self, in_features=1, hidden_features=200, out_features=1): - """Init - - Args: - in_features (int, optional): Input features. Defaults to 1. - hidden_features (int, optional): Hidden dims. Defaults to 200. - out_features (int, optional): Output dims. Defaults to 1. - """ - super(dmodel, self).__init__() - - self.fc1 = nn.Linear(in_features, hidden_features) - self.fc2 = nn.Linear(hidden_features, hidden_features) - self.fc3 = nn.Linear(hidden_features, hidden_features) - self.fc4 = nn.Linear(hidden_features, out_features) - - - def forward(self, x): - x = self.fc1(x) - x = F.relu(x) # ReLU activation - x = self.fc2(x) - x = F.relu(x) # ReLU activation - x = self.fc3(x) - x = F.relu(x) # ReLU activation - x = self.fc4(x) - return x \ No newline at end of file diff --git a/spaces/arikru/packstation-inspector/app.py b/spaces/arikru/packstation-inspector/app.py deleted file mode 100644 index 0eb11b29ce5da7a1a1e8934bda724c06bcf5bc02..0000000000000000000000000000000000000000 --- a/spaces/arikru/packstation-inspector/app.py +++ /dev/null @@ -1,30 +0,0 @@ -# AUTOGENERATED! DO NOT EDIT! File to edit: app.ipynb. - -# %% auto 0 -__all__ = ['learn', 'categories', 'im', 'image', 'label', 'examples', 'intf', 'classify_image'] - -# %% app.ipynb 1 -from fastai.vision.all import * -import gradio as gr - -# %% app.ipynb 2 -learn = load_learner('export.pkl') - -# %% app.ipynb 7 -categories = ('dreckig', 'sauber') -im = PILImage.create('packstation_sauber.jpg') -im.thumbnail((192,192)) -im - -learn.predict(im) -def classify_image(img): - prod,idx,probs = learn.predict(img) - return dict(zip(categories, map(float,probs))) - -# %% app.ipynb 9 -image = gr.inputs.Image(shape=(192,192)) -label = gr.outputs.Label() -examples = ['packstation_sauber.jpg', 'packstation_dreckig.jpg'] - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline=False) diff --git a/spaces/artificialguybr/freedom/README.md b/spaces/artificialguybr/freedom/README.md deleted file mode 100644 index 873e7e62d36ccd8fbb8c52187dfea4bf94ee2b6f..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/freedom/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Freedom -emoji: 🌖 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/bin/resample.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/bin/resample.py deleted file mode 100644 index a3f28485d1fb235ab0d521ee30318c64b48fbd5a..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/bin/resample.py +++ /dev/null @@ -1,90 +0,0 @@ -import argparse -import glob -import os -from argparse import RawTextHelpFormatter -from multiprocessing import Pool -from shutil import copytree - -import librosa -import soundfile as sf -from tqdm import tqdm - - -def resample_file(func_args): - filename, output_sr = func_args - y, sr = librosa.load(filename, sr=output_sr) - sf.write(filename, y, sr) - - -def resample_files(input_dir, output_sr, output_dir=None, file_ext="wav", n_jobs=10): - if output_dir: - print("Recursively copying the input folder...") - copytree(input_dir, output_dir) - input_dir = output_dir - - print("Resampling the audio files...") - audio_files = glob.glob(os.path.join(input_dir, f"**/*.{file_ext}"), recursive=True) - print(f"Found {len(audio_files)} files...") - audio_files = list(zip(audio_files, len(audio_files) * [output_sr])) - with Pool(processes=n_jobs) as p: - with tqdm(total=len(audio_files)) as pbar: - for _, _ in enumerate(p.imap_unordered(resample_file, audio_files)): - pbar.update() - - print("Done !") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="""Resample a folder recusively with librosa - Can be used in place or create a copy of the folder as an output.\n\n - Example run: - python TTS/bin/resample.py - --input_dir /root/LJSpeech-1.1/ - --output_sr 22050 - --output_dir /root/resampled_LJSpeech-1.1/ - --file_ext wav - --n_jobs 24 - """, - formatter_class=RawTextHelpFormatter, - ) - - parser.add_argument( - "--input_dir", - type=str, - default=None, - required=True, - help="Path of the folder containing the audio files to resample", - ) - - parser.add_argument( - "--output_sr", - type=int, - default=22050, - required=False, - help="Samlple rate to which the audio files should be resampled", - ) - - parser.add_argument( - "--output_dir", - type=str, - default=None, - required=False, - help="Path of the destination folder. If not defined, the operation is done in place", - ) - - parser.add_argument( - "--file_ext", - type=str, - default="wav", - required=False, - help="Extension of the audio files to resample", - ) - - parser.add_argument( - "--n_jobs", type=int, default=None, help="Number of threads to use, by default it uses all cores" - ) - - args = parser.parse_args() - - resample_files(args.input_dir, args.output_sr, args.output_dir, args.file_ext, args.n_jobs) diff --git a/spaces/artificialguybr/video-dubbing/TTS/hubconf.py b/spaces/artificialguybr/video-dubbing/TTS/hubconf.py deleted file mode 100644 index 0c9c5930fcbf98962d3086e7537aa3941b191083..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/hubconf.py +++ /dev/null @@ -1,46 +0,0 @@ -dependencies = [ - 'torch', 'gdown', 'pysbd', 'gruut', 'anyascii', 'pypinyin', 'coqpit', 'mecab-python3', 'unidic-lite' -] -import torch - -from TTS.utils.manage import ModelManager -from TTS.utils.synthesizer import Synthesizer - - -def tts(model_name='tts_models/en/ljspeech/tacotron2-DCA', - vocoder_name=None, - use_cuda=False): - """TTS entry point for PyTorch Hub that provides a Synthesizer object to synthesize speech from a give text. - - Example: - >>> synthesizer = torch.hub.load('coqui-ai/TTS', 'tts', source='github') - >>> wavs = synthesizer.tts("This is a test! This is also a test!!") - wavs - is a list of values of the synthesized speech. - - Args: - model_name (str, optional): One of the model names from .model.json. Defaults to 'tts_models/en/ljspeech/tacotron2-DCA'. - vocoder_name (str, optional): One of the model names from .model.json. Defaults to 'vocoder_models/en/ljspeech/multiband-melgan'. - pretrained (bool, optional): [description]. Defaults to True. - - Returns: - TTS.utils.synthesizer.Synthesizer: Synthesizer object wrapping both vocoder and tts models. - """ - manager = ModelManager() - - model_path, config_path, model_item = manager.download_model(model_name) - vocoder_name = model_item[ - 'default_vocoder'] if vocoder_name is None else vocoder_name - vocoder_path, vocoder_config_path, _ = manager.download_model(vocoder_name) - - # create synthesizer - synt = Synthesizer(tts_checkpoint=model_path, - tts_config_path=config_path, - vocoder_checkpoint=vocoder_path, - vocoder_config=vocoder_config_path, - use_cuda=use_cuda) - return synt - - -if __name__ == '__main__': - synthesizer = torch.hub.load('coqui-ai/TTS:dev', 'tts', source='github') - synthesizer.tts("This is a test!") diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v3/tests/test_renderers.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v3/tests/test_renderers.py deleted file mode 100644 index ba8b4c9855a36774ca88a1027b12c9910213b67d..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vegalite/v3/tests/test_renderers.py +++ /dev/null @@ -1,86 +0,0 @@ -"""Tests of various renderers""" - -import json -import re - -import pytest - -import altair.vegalite.v3 as alt - - -def _extract_embedOpt(html): - """Extract an embedOpt definition from an html string. - - Note: this is very brittle, but works for the specific test in this file. - """ - result = re.search(r"embedOpt\s+=\s+(?P\{.*?\})", html) - if not result: - return None - else: - return json.loads(result.groupdict()["embedOpt"]) - - -@pytest.fixture -def chart(): - return alt.Chart("data.csv").mark_point() - - -def test_colab_renderer_embed_options(chart): - """Test that embed_options in renderer metadata are correctly manifest in html""" - - def assert_actions_true(chart): - bundle = chart._repr_mimebundle_(None, None) - embedOpt = _extract_embedOpt(bundle["text/html"]) - assert embedOpt == {"actions": True, "mode": "vega-lite"} - - def assert_actions_false(chart): - bundle = chart._repr_mimebundle_(None, None) - embedOpt = _extract_embedOpt(bundle["text/html"]) - assert embedOpt == {"actions": False, "mode": "vega-lite"} - - with alt.renderers.enable("colab", embed_options=dict(actions=False)): - assert_actions_false(chart) - - with alt.renderers.enable("colab"): - with alt.renderers.enable(embed_options=dict(actions=True)): - assert_actions_true(chart) - - with alt.renderers.set_embed_options(actions=False): - assert_actions_false(chart) - - with alt.renderers.set_embed_options(actions=True): - assert_actions_true(chart) - - -def test_default_renderer_embed_options(chart, renderer="default"): - # check that metadata is passed appropriately - mimetype = alt.display.VEGALITE_MIME_TYPE - spec = chart.to_dict() - with alt.renderers.enable(renderer, embed_options=dict(actions=False)): - bundle, metadata = chart._repr_mimebundle_(None, None) - assert set(bundle.keys()) == {mimetype, "text/plain"} - assert bundle[mimetype] == spec - assert metadata == {mimetype: {"embed_options": {"actions": False}}} - - # Sanity check: no metadata specified - with alt.renderers.enable(renderer): - bundle, metadata = chart._repr_mimebundle_(None, None) - assert bundle[mimetype] == spec - assert metadata == {} - - -def test_json_renderer_embed_options(chart, renderer="json"): - """Test that embed_options in renderer metadata are correctly manifest in html""" - mimetype = "application/json" - spec = chart.to_dict() - with alt.renderers.enable("json", option="foo"): - bundle, metadata = chart._repr_mimebundle_(None, None) - assert set(bundle.keys()) == {mimetype, "text/plain"} - assert bundle[mimetype] == spec - assert metadata == {mimetype: {"option": "foo"}} - - # Sanity check: no options specified - with alt.renderers.enable(renderer): - bundle, metadata = chart._repr_mimebundle_(None, None) - assert bundle[mimetype] == spec - assert metadata == {} diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/benchmark/dummy_model.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/benchmark/dummy_model.py deleted file mode 100644 index ff26e4fe655d8e8d7f9942c4bd3df7cd267405fb..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/benchmark/dummy_model.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.nn as nn -import torch.nn.functional as F -from fairseq.data import Dictionary -from fairseq.models import ( - FairseqDecoder, - FairseqLanguageModel, - register_model, - register_model_architecture, -) - - -@register_model("dummy_model") -class DummyModel(FairseqLanguageModel): - def __init__(self, args, encoder): - super().__init__(encoder) - self.args = args - - @staticmethod - def add_args(parser): - parser.add_argument("--num-layers", type=int, default=24) - parser.add_argument("--embed-dim", type=int, default=1024) - - @classmethod - def build_model(cls, args, task): - encoder = DummyEncoder( - num_embed=len(task.target_dictionary), - embed_dim=args.embed_dim, - num_layers=args.num_layers, - ) - return cls(args, encoder) - - def forward(self, src_tokens, masked_tokens=None, **kwargs): - return self.decoder(src_tokens, masked_tokens=masked_tokens) - - -class DummyEncoder(FairseqDecoder): - def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24): - super().__init__(Dictionary()) - self.embed = nn.Embedding( - num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0 - ) - self.layers_a = nn.ModuleList( - [ - nn.Sequential( - nn.LayerNorm(embed_dim), - nn.Linear(embed_dim, 3 * embed_dim), # q, k, v input projection - nn.Linear(3 * embed_dim, embed_dim), # skip self-attention - nn.Linear(embed_dim, embed_dim), # output projection - nn.Dropout(), - ) - for i in range(num_layers) - ] - ) - self.layers_b = nn.ModuleList( - [ - nn.Sequential( - nn.LayerNorm(embed_dim), - nn.Linear(embed_dim, 4 * embed_dim), # FFN - nn.ReLU(), - nn.Linear(4 * embed_dim, embed_dim), # FFN - nn.Dropout(0.1), - ) - for i in range(num_layers) - ] - ) - self.out_proj = nn.Linear(embed_dim, num_embed) - - def forward(self, tokens, masked_tokens=None): - x = self.embed(tokens) - for layer_a, layer_b in zip(self.layers_a, self.layers_b): - x = x + layer_a(x) - x = x + layer_b(x) - x = self.out_proj(x) - if masked_tokens is not None: - x = x[masked_tokens] - return (x,) - - def max_positions(self): - return 1024 - - def get_normalized_probs(self, net_output, log_probs, sample=None): - logits = net_output[0].float() - if log_probs: - return F.log_softmax(logits, dim=-1) - else: - return F.softmax(logits, dim=-1) - - -@register_model_architecture("dummy_model", "dummy_model") -def base_architecture(args): - pass diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/config/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/config/__init__.py deleted file mode 100644 index 6264236915a7269a4d920ee8213004374dd86a9a..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/config/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/asciicorp/hotel-chat/vector_qa.py b/spaces/asciicorp/hotel-chat/vector_qa.py deleted file mode 100644 index 8bcc27afd7e7d871d8d2bd305713898aeb5c9374..0000000000000000000000000000000000000000 --- a/spaces/asciicorp/hotel-chat/vector_qa.py +++ /dev/null @@ -1,30 +0,0 @@ -from langchain.llms import OpenAI -from langchain.chains import RetrievalQA -from langchain.prompts import PromptTemplate - -import pickle -import os - -os.environ["OPENAI_API_KEY"] = "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" - -llm = OpenAI(temperature=0) - -prompt_template = """Use the following pieces of context to answer the question at the end. give a friendly and conversational answer a customer service agent might give. -{context} -Question: {question} -Answer:""" -PROMPT = PromptTemplate( - template=prompt_template, input_variables=["context", "question"] -) - -chain_type_kwargs = {"prompt": PROMPT} - -with open("vectorstore.pkl", "rb") as f: - vectorstore = pickle.load(f) - -hotel_details_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vectorstore.as_retriever(), chain_type_kwargs=chain_type_kwargs) - -with open("vectorstore_rooms.pkl", "rb") as f: - vectorstore_rooms = pickle.load(f) - -room_details_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vectorstore_rooms.as_retriever(), chain_type_kwargs=chain_type_kwargs) \ No newline at end of file diff --git a/spaces/aseifert/ExplaiNER/src/utils.py b/spaces/aseifert/ExplaiNER/src/utils.py deleted file mode 100644 index 7443acbb3ca7e879e690f5fed981009b94cef6fc..0000000000000000000000000000000000000000 --- a/spaces/aseifert/ExplaiNER/src/utils.py +++ /dev/null @@ -1,255 +0,0 @@ -from pathlib import Path - -import matplotlib as matplotlib -import matplotlib.cm as cm -import pandas as pd -import streamlit as st -import tokenizers -import torch -import torch.nn.functional as F -from st_aggrid import AgGrid, GridOptionsBuilder, GridUpdateMode - -PROJ = Path(__file__).parent - -tokenizer_hash_funcs = { - tokenizers.Tokenizer: lambda _: None, - tokenizers.AddedToken: lambda _: None, -} -# device = torch.device("cuda" if torch.cuda.is_available() else "cpu" if torch.has_mps else "cpu") -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -classmap = { - "O": "O", - "PER": "🙎", - "person": "🙎", - "LOC": "🌎", - "location": "🌎", - "ORG": "🏤", - "corporation": "🏤", - "product": "📱", - "creative": "🎷", - "MISC": "🎷", -} - - -def aggrid_interactive_table(df: pd.DataFrame) -> dict: - """Creates an st-aggrid interactive table based on a dataframe. - - Args: - df (pd.DataFrame]): Source dataframe - Returns: - dict: The selected row - """ - options = GridOptionsBuilder.from_dataframe( - df, enableRowGroup=True, enableValue=True, enablePivot=True - ) - - options.configure_side_bar() - # options.configure_default_column(cellRenderer=JsCode('''function(params) {return ''+params.value+''}''')) - - options.configure_selection("single") - selection = AgGrid( - df, - enable_enterprise_modules=True, - gridOptions=options.build(), - theme="light", - update_mode=GridUpdateMode.NO_UPDATE, - allow_unsafe_jscode=True, - ) - - return selection - - -def explode_df(df: pd.DataFrame) -> pd.DataFrame: - """Takes a dataframe and explodes all the fields.""" - - df_tokens = df.apply(pd.Series.explode) - if "losses" in df.columns: - df_tokens["losses"] = df_tokens["losses"].astype(float) - return df_tokens # type: ignore - - -def align_sample(row: pd.Series): - """Uses word_ids to align all lists in a sample.""" - - columns = row.axes[0].to_list() - indices = [i for i, id in enumerate(row.word_ids) if id >= 0 and id != row.word_ids[i - 1]] - - out = {} - - tokens = [] - for i, tok in enumerate(row.tokens): - if row.word_ids[i] == -1: - continue - - if row.word_ids[i] != row.word_ids[i - 1]: - tokens.append(tok.lstrip("▁").lstrip("##").rstrip("@@")) - else: - tokens[-1] += tok.lstrip("▁").lstrip("##").rstrip("@@") - out["tokens"] = tokens - - if "preds" in columns: - out["preds"] = [row.preds[i] for i in indices] - - if "labels" in columns: - out["labels"] = [row.labels[i] for i in indices] - - if "losses" in columns: - out["losses"] = [row.losses[i] for i in indices] - - if "probs" in columns: - out["probs"] = [row.probs[i] for i in indices] - - if "hidden_states" in columns: - out["hidden_states"] = [row.hidden_states[i] for i in indices] - - if "ids" in columns: - out["ids"] = row.ids - - assert len(tokens) == len(out["preds"]), (tokens, row.tokens) - - return out - - -@st.cache( - allow_output_mutation=True, - hash_funcs=tokenizer_hash_funcs, -) -def tag_text(text: str, tokenizer, model, device: torch.device) -> pd.DataFrame: - """Tags a given text and creates an (exploded) DataFrame with the predicted labels and probabilities. - - Args: - text (str): The text to be processed - tokenizer: Tokenizer to use - model (_type_): Model to use - device (torch.device): The device we want pytorch to use for its calcultaions. - - Returns: - pd.DataFrame: A data frame holding the tagged text. - """ - - tokens = tokenizer(text).tokens() - tokenized = tokenizer(text, return_tensors="pt") - word_ids = [w if w is not None else -1 for w in tokenized.word_ids()] - input_ids = tokenized.input_ids.to(device) - outputs = model(input_ids, output_hidden_states=True) - preds = torch.argmax(outputs.logits, dim=2) - preds = [model.config.id2label[p] for p in preds[0].cpu().numpy()] - hidden_states = outputs.hidden_states[-1][0].detach().cpu().numpy() - # hidden_states = np.mean([hidden_states, outputs.hidden_states[0][0].detach().cpu().numpy()], axis=0) - - probs = 1 // ( - torch.min(F.softmax(outputs.logits, dim=-1), dim=-1).values[0].detach().cpu().numpy() - ) - - df = pd.DataFrame( - [[tokens, word_ids, preds, probs, hidden_states]], - columns="tokens word_ids preds probs hidden_states".split(), - ) - merged_df = pd.DataFrame(df.apply(align_sample, axis=1).tolist()) - return explode_df(merged_df).reset_index().drop(columns=["index"]) - - -def get_bg_color(label: str): - """Retrieves a label's color from the session state.""" - return st.session_state[f"color_{label}"] - - -def get_fg_color(bg_color_hex: str) -> str: - """Chooses the proper (foreground) text color (black/white) for a given background color, maximizing contrast. - - Adapted from https://gomakethings.com/dynamically-changing-the-text-color-based-on-background-color-contrast-with-vanilla-js/ - - Args: - bg_color_hex (str): The background color given as a HEX stirng. - - Returns: - str: Either "black" or "white". - """ - r = int(bg_color_hex[1:3], 16) - g = int(bg_color_hex[3:5], 16) - b = int(bg_color_hex[5:7], 16) - yiq = ((r * 299) + (g * 587) + (b * 114)) / 1000 - return "black" if (yiq >= 128) else "white" - - -def colorize_classes(df: pd.DataFrame) -> pd.DataFrame: - """Colorizes the errors in the dataframe.""" - - def colorize_row(row): - return [ - "background-color: " - + ("white" if (row["labels"] == "IGN" or (row["preds"] == row["labels"])) else "pink") - + ";" - ] * len(row) - - def colorize_col(col): - if col.name == "labels" or col.name == "preds": - bgs = [] - fgs = [] - for v in col.values: - bgs.append(get_bg_color(v.split("-")[1]) if "-" in v else "#ffffff") - fgs.append(get_fg_color(bgs[-1])) - return [f"background-color: {bg}; color: {fg};" for bg, fg in zip(bgs, fgs)] - return [""] * len(col) - - df = df.reset_index().drop(columns=["index"]).T - return df # .style.apply(colorize_col, axis=0) - - -def htmlify_labeled_example(example: pd.DataFrame) -> str: - """Builds an HTML (string) representation of a single example. - - Args: - example (pd.DataFrame): The example to process. - - Returns: - str: An HTML string representation of a single example. - """ - html = [] - - for _, row in example.iterrows(): - pred = row.preds.split("-")[1] if "-" in row.preds else "O" - label = row.labels - label_class = row.labels.split("-")[1] if "-" in row.labels else "O" - - color = get_bg_color(row.preds.split("-")[1]) if "-" in row.preds else "#000000" - true_color = get_bg_color(row.labels.split("-")[1]) if "-" in row.labels else "#000000" - - font_color = get_fg_color(color) if color else "white" - true_font_color = get_fg_color(true_color) if true_color else "white" - - is_correct = row.preds == row.labels - loss_html = ( - "" - if float(row.losses) < 0.01 - else f"{row.losses:.3f}" - ) - loss_html = "" - - if row.labels == row.preds == "O": - html.append(f"{row.tokens}") - elif row.labels == "IGN": - assert False - else: - opacity = "1" if not is_correct else "0.5" - correct = ( - "" - if is_correct - else f"{classmap[label_class]}" - ) - pred_icon = classmap[pred] if pred != "O" and row.preds[:2] != "I-" else "" - html.append( - f"{pred_icon + ' '}{row.tokens}{correct}{loss_html}" - ) - - return " ".join(html) - - -def color_map_color(value: float, cmap_name="Set1", vmin=0, vmax=1) -> str: - """Turns a value into a color using a color map.""" - norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax) - cmap = cm.get_cmap(cmap_name) # PiYG - rgba = cmap(norm(abs(value))) - color = matplotlib.colors.rgb2hex(rgba[:3]) - return color diff --git a/spaces/avivdm1/AutoGPT/Dockerfile b/spaces/avivdm1/AutoGPT/Dockerfile deleted file mode 100644 index 8396154998f32a50d55c199a674b638d5cf7bda2..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# Use an official Python base image from the Docker Hub -FROM python:3.10-slim - -# Install git -RUN apt-get -y update -RUN apt-get -y install git chromium-driver - -# Install Xvfb and other dependencies for headless browser testing -RUN apt-get update \ - && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates - -# Install Firefox / Chromium -RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ - && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ - && apt-get update \ - && apt-get install -y chromium firefox-esr - -# Set environment variables -ENV PIP_NO_CACHE_DIR=yes \ - PYTHONUNBUFFERED=1 \ - PYTHONDONTWRITEBYTECODE=1 - -# Create a non-root user and set permissions -RUN useradd --create-home appuser -WORKDIR /home/appuser -RUN chown appuser:appuser /home/appuser -USER appuser - -# Copy the requirements.txt file and install the requirements -COPY --chown=appuser:appuser requirements.txt . -RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ - pip install --no-cache-dir --user -r requirements.txt - -# Copy the application files -COPY --chown=appuser:appuser autogpt/ ./autogpt - -# Set the entrypoint -ENTRYPOINT ["python", "-m", "autogpt"] diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/ldmlib/data/lsun.py b/spaces/awaawawawa/iurf7irfuyytruyyugb/ldmlib/data/lsun.py deleted file mode 100644 index 6256e45715ff0b57c53f985594d27cbbbff0e68e..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/ldmlib/data/lsun.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import numpy as np -import PIL -from PIL import Image -from torch.utils.data import Dataset -from torchvision import transforms - - -class LSUNBase(Dataset): - def __init__(self, - txt_file, - data_root, - size=None, - interpolation="bicubic", - flip_p=0.5 - ): - self.data_paths = txt_file - self.data_root = data_root - with open(self.data_paths, "r") as f: - self.image_paths = f.read().splitlines() - self._length = len(self.image_paths) - self.labels = { - "relative_file_path_": [l for l in self.image_paths], - "file_path_": [os.path.join(self.data_root, l) - for l in self.image_paths], - } - - self.size = size - self.interpolation = {"linear": PIL.Image.LINEAR, - "bilinear": PIL.Image.BILINEAR, - "bicubic": PIL.Image.BICUBIC, - "lanczos": PIL.Image.LANCZOS, - }[interpolation] - self.flip = transforms.RandomHorizontalFlip(p=flip_p) - - def __len__(self): - return self._length - - def __getitem__(self, i): - example = dict((k, self.labels[k][i]) for k in self.labels) - image = Image.open(example["file_path_"]) - if not image.mode == "RGB": - image = image.convert("RGB") - - # default to score-sde preprocessing - img = np.array(image).astype(np.uint8) - crop = min(img.shape[0], img.shape[1]) - h, w, = img.shape[0], img.shape[1] - img = img[(h - crop) // 2:(h + crop) // 2, - (w - crop) // 2:(w + crop) // 2] - - image = Image.fromarray(img) - if self.size is not None: - image = image.resize((self.size, self.size), resample=self.interpolation) - - image = self.flip(image) - image = np.array(image).astype(np.uint8) - example["image"] = (image / 127.5 - 1.0).astype(np.float32) - return example - - -class LSUNChurchesTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs) - - -class LSUNChurchesValidation(LSUNBase): - def __init__(self, flip_p=0., **kwargs): - super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches", - flip_p=flip_p, **kwargs) - - -class LSUNBedroomsTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs) - - -class LSUNBedroomsValidation(LSUNBase): - def __init__(self, flip_p=0.0, **kwargs): - super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms", - flip_p=flip_p, **kwargs) - - -class LSUNCatsTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs) - - -class LSUNCatsValidation(LSUNBase): - def __init__(self, flip_p=0., **kwargs): - super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats", - flip_p=flip_p, **kwargs) diff --git a/spaces/awacke1/MultiRhymeLyricSmith/rhyme-with-ai/rhyme.py b/spaces/awacke1/MultiRhymeLyricSmith/rhyme-with-ai/rhyme.py deleted file mode 100644 index db5d6655768eda8b4545af452eb3cb3d6be9c05a..0000000000000000000000000000000000000000 --- a/spaces/awacke1/MultiRhymeLyricSmith/rhyme-with-ai/rhyme.py +++ /dev/null @@ -1,64 +0,0 @@ -import functools -import random -from typing import List, Optional - -import requests -from gazpacho import Soup, get - -from rhyme_with_ai.utils import find_last_word - - -def query_rhyme_words(sentence: str, n_rhymes: int, language:str="english") -> List[str]: - """Returns a list of rhyme words for a sentence. - Parameters - ---------- - sentence : Sentence that may end with punctuation - n_rhymes : Maximum number of rhymes to return - Returns - ------- - List[str] -- List of words that rhyme with the final word - """ - last_word = find_last_word(sentence) - if language == "english": - return query_datamuse_api(last_word, n_rhymes) - elif language == "dutch": - return mick_rijmwoordenboek(last_word, n_rhymes) - else: - raise NotImplementedError(f"Unsupported language ({language}) expected 'english' or 'dutch'.") - - -def query_datamuse_api(word: str, n_rhymes: Optional[int] = None) -> List[str]: - """Query the DataMuse API. - Parameters - ---------- - word : Word to rhyme with - n_rhymes : Max rhymes to return - Returns - ------- - Rhyme words - """ - out = requests.get( - "https://api.datamuse.com/words", params={"rel_rhy": word} - ).json() - words = [_["word"] for _ in out] - if n_rhymes is None: - return words - return words[:n_rhymes] - - -@functools.lru_cache(maxsize=128, typed=False) -def mick_rijmwoordenboek(word: str, n_words: int): - url = f"https://rijmwoordenboek.nl/rijm/{word}" - html = get(url) - soup = Soup(html) - - results = soup.find("div", {"id": "rhymeResultsWords"}).html.split("
") - - # clean up - results = [r.replace("\n", "").replace(" ", "") for r in results] - - # filter html and empty strings - results = [r for r in results if ("<" not in r) and (len(r) > 0)] - - return random.sample(results, min(len(results), n_words)) - diff --git a/spaces/awacke1/PDFViewerwithUpdatesWorkBench/app.py b/spaces/awacke1/PDFViewerwithUpdatesWorkBench/app.py deleted file mode 100644 index bb37be37a7023ecc0313ee3615a9e36f91848cca..0000000000000000000000000000000000000000 --- a/spaces/awacke1/PDFViewerwithUpdatesWorkBench/app.py +++ /dev/null @@ -1,13 +0,0 @@ -#import fitz -import streamlit as st - -uploaded_pdf = st.file_uploader("Load pdf: ", type=['pdf']) - -def show_pdf(file_path): - with open(file_path,"rb") as f: - base64_pdf = base64.b64encode(f.read()).decode('utf-8') - pdf_display = f'' - st.markdown(pdf_display, unsafe_allow_html=True) - -if uploaded_pdf is not None: - show_pdf(uploaded_pdf) diff --git a/spaces/awacke1/VizLib-TopLargeHospitalsNewJersey-03-09-2023/app.py b/spaces/awacke1/VizLib-TopLargeHospitalsNewJersey-03-09-2023/app.py deleted file mode 100644 index 05adfa181088800fc3ff4f4847de72688e4fe5a5..0000000000000000000000000000000000000000 --- a/spaces/awacke1/VizLib-TopLargeHospitalsNewJersey-03-09-2023/app.py +++ /dev/null @@ -1,58 +0,0 @@ -import streamlit as st -import graphviz as gv -from graphviz import Graph -import folium -from streamlit_folium import folium_static - -# Define the cluster relations graph using gvmap -g = Graph(format='svg') -g.graph_attr['bgcolor'] = '#FFFFFF' -g.graph_attr['outputorder'] = 'edgesfirst' -g.graph_attr['size'] = '10,10' -g.node_attr['style'] = 'filled' -g.node_attr['shape'] = 'box' -g.node_attr['fillcolor'] = '#FFDAB9' - -with g.subgraph(name='cluster_NJ') as c: - c.graph_attr['bgcolor'] = '#ADD8E6' - c.node_attr['color'] = '#000000' - c.node_attr['fontcolor'] = '#000000' - c.attr(label='New Jersey', fontsize='24') - c.node('Hackensack Meridian Health', URL='https://www.hackensackmeridianhealth.org/', target='_blank', tooltip='Hackensack Meridian Health: Hackensack University Medical Center') - c.node('RWJBarnabas Health', URL='https://www.rwjbh.org/', target='_blank', tooltip='RWJBarnabas Health: Robert Wood Johnson University Hospital') - c.node('Atlantic Health System', URL='https://www.atlantichealth.org/', target='_blank', tooltip='Atlantic Health System: Morristown Medical Center') - c.node('Virtua Health', URL='https://www.virtua.org/', target='_blank', tooltip='Virtua Health: Virtua Memorial Hospital') - c.node('Inspira Health', URL='https://www.inspirahealthnetwork.org/', target='_blank', tooltip='Inspira Health: Inspira Medical Center Vineland') - c.node('Cooper University Health Care', URL='https://www.cooperhealth.org/', target='_blank', tooltip='Cooper University Health Care: Cooper University Hospital') - c.node('University Hospital', URL='https://www.uhnj.org/', target='_blank', tooltip='University Hospital: University Hospital') - c.node('Robert Wood Johnson University Hospital Hamilton', URL='https://www.rwjbh.org/robert-wood-johnson-university-hospital-hamilton/', target='_blank', tooltip='Robert Wood Johnson University Hospital Hamilton: Robert Wood Johnson University Hospital Hamilton') - c.node('Trinitas Regional Medical Center', URL='https://www.trinitasrmc.org/', target='_blank', tooltip='Trinitas Regional Medical Center: Trinitas Regional Medical Center') - c.node('Capital Health Regional Medical Center', URL='https://www.capitalhealth.org/', target='_blank', tooltip='Capital Health Regional Medical Center: Capital Health Regional Medical Center') - -# Render the graph using streamlit -st.graphviz_chart(g) - -# Define hospitals data -hospitals = [('Hackensack Meridian Health', 'Hackensack University Medical Center', 40.899886, -74.039179), - ('RWJBarnabas Health', 'Robert Wood Johnson University Hospital', 40.491301, -74.450611), - ('Atlantic Health System', 'Morristown Medical Center', 40.787231, -74.473851), - ('Virtua Health', 'Virtua Memorial Hospital', 39.931229, -75.025831), - ('Inspira Health', 'Inspira Medical Center Vineland', 39.460225, -75.035542), - ('Cooper University Health Care', 'Cooper University Hospital', 39.942743, -75.119090), - ('University Hospital', 'University Hospital', 40.742310, -74.177609), - ('Robert Wood Johnson University Hospital Hamilton', 'Robert Wood Johnson University Hospital Hamilton', 40.214008, -74.679619), - ('Trinitas Regional Medical Center', 'Trinitas Regional Medical Center', 40.661474, -74.215013), - ('Capital Health Regional Medical Center', 'Capital Health Regional Medical Center', 40.266778, -74.796452)] - -#Create a map centered on New Jersey -m = folium.Map(location=[40.0583, -74.4057], zoom_start=8) - -#Add markers for each hospital -for hospital in hospitals: - folium.Marker( - location=[hospital[2], hospital[3]], - popup=f'{hospital[1]}
{hospital[2]},{hospital[3]}' - ).add_to(m) - -#Display the map in Streamlit -folium_static(m) diff --git a/spaces/axuint/OpenNiji/app.py b/spaces/axuint/OpenNiji/app.py deleted file mode 100644 index 77e77186169c96b041fd9e4a5588642dd219bf73..0000000000000000000000000000000000000000 --- a/spaces/axuint/OpenNiji/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/Korakoe/OpenNiji").launch() \ No newline at end of file diff --git a/spaces/ayapoooooo123/openai-jukebox-1b-lyrics/README.md b/spaces/ayapoooooo123/openai-jukebox-1b-lyrics/README.md deleted file mode 100644 index 7513f56e1c10332f93d220dc609e4c6c497afdb3..0000000000000000000000000000000000000000 --- a/spaces/ayapoooooo123/openai-jukebox-1b-lyrics/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Openai Jukebox 1b Lyrics -emoji: 💻 -colorFrom: red -colorTo: blue -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false -license: bigscience-bloom-rail-1.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/banana-projects/web3d/node_modules/three/src/materials/LineBasicMaterial.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/materials/LineBasicMaterial.d.ts deleted file mode 100644 index f4c56c18feacd1e90e818000cb010cef58eebcdd..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/materials/LineBasicMaterial.d.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { Color } from './../math/Color'; -import { MaterialParameters, Material } from './Material'; - -export interface LineBasicMaterialParameters extends MaterialParameters { - color?: Color | string | number; - linewidth?: number; - linecap?: string; - linejoin?: string; -} - -export class LineBasicMaterial extends Material { - constructor(parameters?: LineBasicMaterialParameters); - - color: Color; - linewidth: number; - linecap: string; - linejoin: string; - - setValues(parameters: LineBasicMaterialParameters): void; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/math/Interpolant.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/math/Interpolant.d.ts deleted file mode 100644 index 1d3301a03086d6c19c72d5a8b713b3ce124a819b..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/math/Interpolant.d.ts +++ /dev/null @@ -1,15 +0,0 @@ -export abstract class Interpolant { - constructor( - parameterPositions: any, - samplesValues: any, - sampleSize: number, - resultBuffer?: any - ); - - parameterPositions: any; - samplesValues: any; - valueSize: number; - resultBuffer: any; - - evaluate(time: number): any; -} diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327195556.py b/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327195556.py deleted file mode 100644 index 87c94b94347da6ee77c6686922500ed33abf5eaf..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327195556.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -os.system("pip install gfpgan") - -#os.system("pip freeze") -#os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth -P .") -import random -import gradio as gr -from PIL import Image -import torch -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Thomas_Edison2.jpg/1024px-Thomas_Edison2.jpg', 'edison.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a9/Henry_Ford_1888.jpg/1024px-Henry_Ford_1888.jpg', 'Henry.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg/800px-Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg', 'Frida.jpg') - - -import cv2 -import glob -import numpy as np -from basicsr.utils import imwrite -from gfpgan import GFPGANer - -bg_upsampler = None - - - -# set up GFPGAN restorer -restorer = GFPGANer( - model_path='experiments/pretrained_models/GFPGANv1.3.pth', - upscale=2, - arch='clean', - channel_multiplier=2, - bg_upsampler=bg_upsampler) - - -def inference(img): - input_img = cv2.imread(img, cv2.IMREAD_COLOR) - cropped_faces, restored_faces, restored_img = restorer.enhance( - input_img, has_aligned=False, only_center_face=False, paste_back=True) - - #return Image.fromarray(restored_faces[0][:,:,::-1]) - return Image.fromarray(restored_img[:, :, ::-1]) - -title = "让美好回忆更清晰" - - -description = "上传老照片,点击Submit,稍等片刻,右侧Output将照片另存为即可。" -article = "

visitor badge
" - -article = "

| Github Repo

visitor badge
" - -gr.Interface( - inference, - [gr.inputs.Image(type="filepath", label="Input")], - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['lincoln.jpg'], - ['einstein.png'], - ['edison.jpg'], - ['Henry.jpg'], - ['Frida.jpg'] - ] - ).launch(enable_queue=True,cache_examples=True,share=True) - - diff --git a/spaces/beki/pii-anonymizer/spacy_recognizer.py b/spaces/beki/pii-anonymizer/spacy_recognizer.py deleted file mode 100644 index 438784bff558b160afeb6c781d81944fd3dd2efe..0000000000000000000000000000000000000000 --- a/spaces/beki/pii-anonymizer/spacy_recognizer.py +++ /dev/null @@ -1,131 +0,0 @@ -import logging -from typing import Optional, List, Tuple, Set - -from presidio_analyzer import ( - RecognizerResult, - LocalRecognizer, - AnalysisExplanation, -) -from presidio_analyzer.nlp_engine import NlpArtifacts -from presidio_analyzer.predefined_recognizers.spacy_recognizer import SpacyRecognizer - -logger = logging.getLogger("presidio-analyzer") - - -class CustomSpacyRecognizer(LocalRecognizer): - - ENTITIES = [ - "LOCATION", - "PERSON", - "NRP", - "ORGANIZATION", - "DATE_TIME", - ] - - DEFAULT_EXPLANATION = "Identified as {} by Spacy's Named Entity Recognition (Privy-trained)" - - CHECK_LABEL_GROUPS = [ - ({"LOCATION"}, {"LOC", "LOCATION", "STREET_ADDRESS", "COORDINATE"}), - ({"PERSON"}, {"PER", "PERSON"}), - ({"NRP"}, {"NORP", "NRP"}), - ({"ORGANIZATION"}, {"ORG"}), - ({"DATE_TIME"}, {"DATE_TIME"}), - ] - - MODEL_LANGUAGES = { - "en": "beki/en_spacy_pii_distilbert", - } - - PRESIDIO_EQUIVALENCES = { - "PER": "PERSON", - "LOC": "LOCATION", - "ORG": "ORGANIZATION", - "NROP": "NRP", - "DATE_TIME": "DATE_TIME", - } - - def __init__( - self, - supported_language: str = "en", - supported_entities: Optional[List[str]] = None, - check_label_groups: Optional[Tuple[Set, Set]] = None, - context: Optional[List[str]] = None, - ner_strength: float = 0.85, - ): - self.ner_strength = ner_strength - self.check_label_groups = ( - check_label_groups if check_label_groups else self.CHECK_LABEL_GROUPS - ) - supported_entities = supported_entities if supported_entities else self.ENTITIES - super().__init__( - supported_entities=supported_entities, - supported_language=supported_language, - ) - - def load(self) -> None: - """Load the model, not used. Model is loaded during initialization.""" - pass - - def get_supported_entities(self) -> List[str]: - """ - Return supported entities by this model. - :return: List of the supported entities. - """ - return self.supported_entities - - def build_spacy_explanation( - self, original_score: float, explanation: str - ) -> AnalysisExplanation: - """ - Create explanation for why this result was detected. - :param original_score: Score given by this recognizer - :param explanation: Explanation string - :return: - """ - explanation = AnalysisExplanation( - recognizer=self.__class__.__name__, - original_score=original_score, - textual_explanation=explanation, - ) - return explanation - - def analyze(self, text, entities, nlp_artifacts=None): # noqa D102 - results = [] - if not nlp_artifacts: - logger.warning("Skipping SpaCy, nlp artifacts not provided...") - return results - - ner_entities = nlp_artifacts.entities - - for entity in entities: - if entity not in self.supported_entities: - continue - for ent in ner_entities: - if not self.__check_label(entity, ent.label_, self.check_label_groups): - continue - textual_explanation = self.DEFAULT_EXPLANATION.format( - ent.label_) - explanation = self.build_spacy_explanation( - self.ner_strength, textual_explanation - ) - spacy_result = RecognizerResult( - entity_type=entity, - start=ent.start_char, - end=ent.end_char, - score=self.ner_strength, - analysis_explanation=explanation, - recognition_metadata={ - RecognizerResult.RECOGNIZER_NAME_KEY: self.name - }, - ) - results.append(spacy_result) - - return results - - @staticmethod - def __check_label( - entity: str, label: str, check_label_groups: Tuple[Set, Set] - ) -> bool: - return any( - [entity in egrp and label in lgrp for egrp, lgrp in check_label_groups] - ) diff --git a/spaces/beomi/KoRWKV-1.5B/app.py b/spaces/beomi/KoRWKV-1.5B/app.py deleted file mode 100644 index a9cf1c76ab28dbeb76f37ca37bc3111335d560ba..0000000000000000000000000000000000000000 --- a/spaces/beomi/KoRWKV-1.5B/app.py +++ /dev/null @@ -1,93 +0,0 @@ -from threading import Thread - -import torch -import gradio as gr -from transformers import AutoTokenizer, RwkvForCausalLM, TextIteratorStreamer - -model_id = "beomi/KoRWKV-1.5B" -torch_device = "cuda" if torch.cuda.is_available() else "cpu" -print("Running on device:", torch_device) -print("CPU threads:", torch.get_num_threads()) - - -if torch_device == "cuda": - model = RwkvForCausalLM.from_pretrained(model_id, device_map="auto") -else: - model = RwkvForCausalLM.from_pretrained(model_id) -tokenizer = AutoTokenizer.from_pretrained(model_id) - - -def run_generation(user_text, top_p, temperature, max_new_tokens): - # Get the model and tokenizer, and tokenize the user text. - user_text = user_text.strip() - model_inputs = tokenizer([user_text], return_tensors="pt", return_token_type_ids=False).to(torch_device) - - if len(user_text) > 100: - skip_prompt = True - else: - skip_prompt = False - - # Start generation on a separate thread, so that we don't block the UI. The text is pulled from the streamer - # in the main thread. Adds timeout to the streamer to handle exceptions in the generation thread. - streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=skip_prompt, skip_special_tokens=False) - generate_kwargs = dict( - model_inputs, - streamer=streamer, - max_new_tokens=max_new_tokens, - do_sample=True, - top_p=top_p, - temperature=float(temperature), - # repetition_penalty=0.5, - # no_repeat_ngram_size=6, - ) - t = Thread(target=model.generate, kwargs=generate_kwargs) - t.start() - - # Pull the generated text from the streamer, and update the model output. - model_output = "" - for new_text in streamer: - model_output += new_text - yield model_output - return model_output - - -def reset_textbox(): - return gr.update(value='') - - -with gr.Blocks() as demo: - duplicate_link = "https://huggingface.co/spaces/beomi/KoRWKV-1.5B?duplicate=true" - gr.Markdown( - "# 🤗 KoRWKV-1.5B 🔥Streaming🔥 on Gradio\n" - "This demo showcases the use of the " - "[streaming feature](https://huggingface.co/docs/transformers/main/en/generation_strategies#streaming) " - "of 🤗 Transformers with Gradio to generate text in real-time. It uses " - f"[{model_id}](https://huggingface.co/{model_id}) and the Spaces free compute tier.\n\n" - f"Feel free to [duplicate this Space]({duplicate_link}) to try your own models or use this space as a " - "template! 💛" - ) - - with gr.Row(): - with gr.Column(scale=4): - user_text = gr.Textbox( - placeholder="여기에 문장의 시작부분을 입력해주세요...", - label="User input" - ) - model_output = gr.Textbox(label="Model output", lines=10, interactive=False) - button_submit = gr.Button(value="Submit") - - with gr.Column(scale=1): - max_new_tokens = gr.Slider( - minimum=1, maximum=512, value=250, step=1, interactive=True, label="Max New Tokens", - ) - top_p = gr.Slider( - minimum=0.05, maximum=1.0, value=1, step=0.05, interactive=True, label="Top-p (nucleus sampling)", - ) - temperature = gr.Slider( - minimum=0.1, maximum=2.0, value=1, step=0.1, interactive=True, label="Temperature", - ) - - user_text.submit(run_generation, [user_text, top_p, temperature, max_new_tokens], model_output) - button_submit.click(run_generation, [user_text, top_p, temperature, max_new_tokens], model_output) - - demo.queue(max_size=32).launch(enable_queue=True) diff --git a/spaces/bhandsab/meta-llama-Llama-2-70b-chat/README.md b/spaces/bhandsab/meta-llama-Llama-2-70b-chat/README.md deleted file mode 100644 index 5e1fde61df277a4cc08e0bb01046a46fab174138..0000000000000000000000000000000000000000 --- a/spaces/bhandsab/meta-llama-Llama-2-70b-chat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Meta Llama Llama 2 70b Chat -emoji: 🐨 -colorFrom: green -colorTo: yellow -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bioriAsaeru/text-to-voice/((HOT)) Adobe Audition CC 2020 V13.0.3.60 With Crack (x64) Latest The Best Deal You Can Find Online.md b/spaces/bioriAsaeru/text-to-voice/((HOT)) Adobe Audition CC 2020 V13.0.3.60 With Crack (x64) Latest The Best Deal You Can Find Online.md deleted file mode 100644 index 30d794813223878e0368e154255f9077cf9e6a1f..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/((HOT)) Adobe Audition CC 2020 V13.0.3.60 With Crack (x64) Latest The Best Deal You Can Find Online.md +++ /dev/null @@ -1,6 +0,0 @@ -

((HOT)) Adobe Audition CC 2020 V13.0.3.60 With Crack (x64) Latest


DOWNLOAD >>>>> https://urloso.com/2uyPyr



- - aaccfb2cb3
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/IntelliJIDEACracklicensekeywithtorrent100WorkingfreeDownload The Ultimate Guide for Java and Kotlin Developers.md b/spaces/bioriAsaeru/text-to-voice/IntelliJIDEACracklicensekeywithtorrent100WorkingfreeDownload The Ultimate Guide for Java and Kotlin Developers.md deleted file mode 100644 index 3877d5c9e4b34b778146cc2a87ffc036189b749e..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/IntelliJIDEACracklicensekeywithtorrent100WorkingfreeDownload The Ultimate Guide for Java and Kotlin Developers.md +++ /dev/null @@ -1,6 +0,0 @@ -

IntelliJIDEACracklicensekeywithtorrent100WorkingfreeDownload


Download Zip ✶✶✶ https://urloso.com/2uyPqH



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/bonrix/text_detection_easyocr/README.md b/spaces/bonrix/text_detection_easyocr/README.md deleted file mode 100644 index 3022715bf8d6b895e9fc15d6cb7cbff14a95ec6a..0000000000000000000000000000000000000000 --- a/spaces/bonrix/text_detection_easyocr/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text Detection Easyocr -emoji: 🌍 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DeepLab/deeplab/build_solver.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DeepLab/deeplab/build_solver.py deleted file mode 100644 index a1d359c2c35baf75a835879bb4b4f902be235179..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DeepLab/deeplab/build_solver.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import torch - -from detectron2.config import CfgNode -from detectron2.solver import LRScheduler -from detectron2.solver import build_lr_scheduler as build_d2_lr_scheduler - -from .lr_scheduler import WarmupPolyLR - - -def build_lr_scheduler(cfg: CfgNode, optimizer: torch.optim.Optimizer) -> LRScheduler: - """ - Build a LR scheduler from config. - """ - name = cfg.SOLVER.LR_SCHEDULER_NAME - if name == "WarmupPolyLR": - return WarmupPolyLR( - optimizer, - cfg.SOLVER.MAX_ITER, - warmup_factor=cfg.SOLVER.WARMUP_FACTOR, - warmup_iters=cfg.SOLVER.WARMUP_ITERS, - warmup_method=cfg.SOLVER.WARMUP_METHOD, - power=cfg.SOLVER.POLY_LR_POWER, - constant_ending=cfg.SOLVER.POLY_LR_CONSTANT_ENDING, - ) - else: - return build_d2_lr_scheduler(cfg, optimizer) diff --git a/spaces/camel-ai/camel-data-explorer/sync.sh b/spaces/camel-ai/camel-data-explorer/sync.sh deleted file mode 100644 index 7b6bc0fcff9800a24c73b2dc4a5e8b9305101059..0000000000000000000000000000000000000000 --- a/spaces/camel-ai/camel-data-explorer/sync.sh +++ /dev/null @@ -1,15 +0,0 @@ -TMP_DIR=/tmp/camel_hf_tmp -echo $TMP_DIR -HF_REPO_DIR=`realpath .` -echo $HF_REPO_DIR - -mkdir -p $TMP_DIR -git clone -b hf_spaces_2 https://github.com/lightaime/camel.git $TMP_DIR -cd $TMP_DIR - -find apps/data_explorer -name "*.py" | grep -v test | xargs -n 1 -I {} rsync -R {} $HF_REPO_DIR -find apps/common -name "*.py" | grep -v test | xargs -n 1 -I {} rsync -R {} $HF_REPO_DIR - -rm -rf $TMP_DIR - -echo Done diff --git a/spaces/camenduru-com/VITS-Umamusume-voice-synthesizer/monotonic_align/core.c b/spaces/camenduru-com/VITS-Umamusume-voice-synthesizer/monotonic_align/core.c deleted file mode 100644 index 5631d20a9a00db29e143a6e8e4e5c378d6bb850a..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/VITS-Umamusume-voice-synthesizer/monotonic_align/core.c +++ /dev/null @@ -1,21299 +0,0 @@ -/* Generated by Cython 0.29.21 */ - -/* BEGIN: Cython Metadata -{ - "distutils": { - "name": "monotonic_align.core", - "sources": [ - "core.pyx" - ] - }, - "module_name": "monotonic_align.core" -} -END: Cython Metadata */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.6+ or Python 3.3+. -#else -#define CYTHON_ABI "0_29_21" -#define CYTHON_HEX_VERSION 0x001D15F0 -#define CYTHON_FUTURE_DIVISION 0 -#include -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #if PY_VERSION_HEX >= 0x02070000 - #define HAVE_LONG_LONG - #endif -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#ifdef PYPY_VERSION - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(PYSTON_VERSION) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #elif !defined(CYTHON_USE_PYLONG_INTERNALS) - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) - #endif - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) - #endif - #ifndef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) - #endif - #ifndef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if CYTHON_USE_PYLONG_INTERNALS - #include "longintrepr.h" - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int32 uint32_t; - #endif - #endif -#else - #include -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) && __cplusplus >= 201103L - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__ ) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 -#endif -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) - #define __Pyx_DefaultClassType PyClass_Type -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif - #define __Pyx_DefaultClassType PyType_Type -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_FAST_PYCCALL -#define __Pyx_PyFastCFunction_Check(func)\ - ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) -#else -#define __Pyx_PyFastCFunction_Check(func) 0 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 - #define PyMem_RawMalloc(n) PyMem_Malloc(n) - #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) - #define PyMem_RawFree(p) PyMem_Free(p) -#endif -#if CYTHON_COMPILING_IN_PYSTON - #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -#else -#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) -#endif -#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) - #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) - #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) -#else - #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__monotonic_align__core -#define __PYX_HAVE_API__monotonic_align__core -/* Early includes */ -#include "pythread.h" -#include -#include -#include -#include "pystate.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -static PyObject *__pyx_m = NULL; -static PyObject *__pyx_d; -static PyObject *__pyx_b; -static PyObject *__pyx_cython_runtime = NULL; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static PyObject *__pyx_empty_unicode; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -static const char *__pyx_f[] = { - "core.pyx", - "stringsource", -}; -/* NoFastGil.proto */ -#define __Pyx_PyGILState_Ensure PyGILState_Ensure -#define __Pyx_PyGILState_Release PyGILState_Release -#define __Pyx_FastGIL_Remember() -#define __Pyx_FastGIL_Forget() -#define __Pyx_FastGilFuncInit() - -/* MemviewSliceStruct.proto */ -struct __pyx_memoryview_obj; -typedef struct { - struct __pyx_memoryview_obj *memview; - char *data; - Py_ssize_t shape[8]; - Py_ssize_t strides[8]; - Py_ssize_t suboffsets[8]; -} __Pyx_memviewslice; -#define __Pyx_MemoryView_Len(m) (m.shape[0]) - -/* Atomics.proto */ -#include -#ifndef CYTHON_ATOMICS - #define CYTHON_ATOMICS 1 -#endif -#define __pyx_atomic_int_type int -#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ - (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ - !defined(__i386__) - #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) - #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) - #ifdef __PYX_DEBUG_ATOMICS - #warning "Using GNU atomics" - #endif -#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 - #include - #undef __pyx_atomic_int_type - #define __pyx_atomic_int_type LONG - #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) - #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) - #ifdef __PYX_DEBUG_ATOMICS - #pragma message ("Using MSVC atomics") - #endif -#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 - #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) - #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) - #ifdef __PYX_DEBUG_ATOMICS - #warning "Using Intel atomics" - #endif -#else - #undef CYTHON_ATOMICS - #define CYTHON_ATOMICS 0 - #ifdef __PYX_DEBUG_ATOMICS - #warning "Not using atomics" - #endif -#endif -typedef volatile __pyx_atomic_int_type __pyx_atomic_int; -#if CYTHON_ATOMICS - #define __pyx_add_acquisition_count(memview)\ - __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) -#else - #define __pyx_add_acquisition_count(memview)\ - __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) -#endif - -/* ForceInitThreads.proto */ -#ifndef __PYX_FORCE_INIT_THREADS - #define __PYX_FORCE_INIT_THREADS 0 -#endif - -/* BufferFormatStructs.proto */ -#define IS_UNSIGNED(type) (((type) -1) > 0) -struct __Pyx_StructField_; -#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) -typedef struct { - const char* name; - struct __Pyx_StructField_* fields; - size_t size; - size_t arraysize[8]; - int ndim; - char typegroup; - char is_unsigned; - int flags; -} __Pyx_TypeInfo; -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - size_t struct_alignment; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; - char is_valid_array; -} __Pyx_BufFmt_Context; - - -/*--- Type declarations ---*/ -struct __pyx_array_obj; -struct __pyx_MemviewEnum_obj; -struct __pyx_memoryview_obj; -struct __pyx_memoryviewslice_obj; -struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; - -/* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ -struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { - int __pyx_n; - float max_neg_val; -}; - -/* "View.MemoryView":105 - * - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ -struct __pyx_array_obj { - PyObject_HEAD - struct __pyx_vtabstruct_array *__pyx_vtab; - char *data; - Py_ssize_t len; - char *format; - int ndim; - Py_ssize_t *_shape; - Py_ssize_t *_strides; - Py_ssize_t itemsize; - PyObject *mode; - PyObject *_format; - void (*callback_free_data)(void *); - int free_data; - int dtype_is_object; -}; - - -/* "View.MemoryView":279 - * - * @cname('__pyx_MemviewEnum') - * cdef class Enum(object): # <<<<<<<<<<<<<< - * cdef object name - * def __init__(self, name): - */ -struct __pyx_MemviewEnum_obj { - PyObject_HEAD - PyObject *name; -}; - - -/* "View.MemoryView":330 - * - * @cname('__pyx_memoryview') - * cdef class memoryview(object): # <<<<<<<<<<<<<< - * - * cdef object obj - */ -struct __pyx_memoryview_obj { - PyObject_HEAD - struct __pyx_vtabstruct_memoryview *__pyx_vtab; - PyObject *obj; - PyObject *_size; - PyObject *_array_interface; - PyThread_type_lock lock; - __pyx_atomic_int acquisition_count[2]; - __pyx_atomic_int *acquisition_count_aligned_p; - Py_buffer view; - int flags; - int dtype_is_object; - __Pyx_TypeInfo *typeinfo; -}; - - -/* "View.MemoryView":965 - * - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ -struct __pyx_memoryviewslice_obj { - struct __pyx_memoryview_obj __pyx_base; - __Pyx_memviewslice from_slice; - PyObject *from_object; - PyObject *(*to_object_func)(char *); - int (*to_dtype_func)(char *, PyObject *); -}; - - - -/* "View.MemoryView":105 - * - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ - -struct __pyx_vtabstruct_array { - PyObject *(*get_memview)(struct __pyx_array_obj *); -}; -static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; - - -/* "View.MemoryView":330 - * - * @cname('__pyx_memoryview') - * cdef class memoryview(object): # <<<<<<<<<<<<<< - * - * cdef object obj - */ - -struct __pyx_vtabstruct_memoryview { - char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); - PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); -}; -static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; - - -/* "View.MemoryView":965 - * - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ - -struct __pyx_vtabstruct__memoryviewslice { - struct __pyx_vtabstruct_memoryview __pyx_base; -}; -static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) -#endif - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* MemviewSliceInit.proto */ -#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d -#define __Pyx_MEMVIEW_DIRECT 1 -#define __Pyx_MEMVIEW_PTR 2 -#define __Pyx_MEMVIEW_FULL 4 -#define __Pyx_MEMVIEW_CONTIG 8 -#define __Pyx_MEMVIEW_STRIDED 16 -#define __Pyx_MEMVIEW_FOLLOW 32 -#define __Pyx_IS_C_CONTIG 1 -#define __Pyx_IS_F_CONTIG 2 -static int __Pyx_init_memviewslice( - struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference); -static CYTHON_INLINE int __pyx_add_acquisition_count_locked( - __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); -static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( - __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); -#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) -#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) -#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) -#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) -static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); -static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ - const char* function_name); - -/* None.proto */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); - -/* ArgTypeTest.proto */ -#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ - ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ - __Pyx__ArgTypeTest(obj, type, name, exact)) -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* RaiseException.proto */ -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -/* PyCFunctionFastCall.proto */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); -#else -#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) -#endif - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#else -#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif - -/* PyObjectCall2Args.proto */ -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* IncludeStringH.proto */ -#include - -/* BytesEquals.proto */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); - -/* UnicodeEquals.proto */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); - -/* StrEquals.proto */ -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals -#else -#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals -#endif - -/* None.proto */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); - -/* UnaryNegOverflows.proto */ -#define UNARY_NEG_WOULD_OVERFLOW(x)\ - (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) - -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ -/* GetAttr.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* decode_c_string_utf16.proto */ -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = 0; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = -1; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = 1; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} - -/* decode_c_string.proto */ -static CYTHON_INLINE PyObject* __Pyx_decode_c_string( - const char* cstring, Py_ssize_t start, Py_ssize_t stop, - const char* encoding, const char* errors, - PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* GetAttr3.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* RaiseTooManyValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -/* RaiseNeedMoreValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -/* RaiseNoneIterError.proto */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -/* ExtTypeTest.proto */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); - -/* GetTopmostException.proto */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/* SaveResetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -#else -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - -/* GetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* SwapException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -/* ListCompAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len)) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) -#endif - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) -#endif - -/* ListExtend.proto */ -static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { -#if CYTHON_COMPILING_IN_CPYTHON - PyObject* none = _PyList_Extend((PyListObject*)L, v); - if (unlikely(!none)) - return -1; - Py_DECREF(none); - return 0; -#else - return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); -#endif -} - -/* ListAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) -#endif - -/* None.proto */ -static CYTHON_INLINE long __Pyx_div_long(long, long); - -/* ImportFrom.proto */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); - -/* HasAttr.proto */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); - -/* PyObject_GenericGetAttrNoDict.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr -#endif - -/* PyObject_GenericGetAttr.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr -#endif - -/* SetVTable.proto */ -static int __Pyx_SetVtable(PyObject *dict, void *vtable); - -/* PyObjectGetAttrStrNoError.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); - -/* SetupReduce.proto */ -static int __Pyx_setup_reduce(PyObject* type_obj); - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -#if PY_MAJOR_VERSION < 3 - static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); - static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else - #define __Pyx_GetBuffer PyObject_GetBuffer - #define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - - -/* BufferStructDeclare.proto */ -typedef struct { - Py_ssize_t shape, strides, suboffsets; -} __Pyx_Buf_DimInfo; -typedef struct { - size_t refcount; - Py_buffer pybuffer; -} __Pyx_Buffer; -typedef struct { - __Pyx_Buffer *rcbuffer; - char *data; - __Pyx_Buf_DimInfo diminfo[8]; -} __Pyx_LocalBuf_ND; - -/* MemviewSliceIsContig.proto */ -static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); - -/* OverlappingSlices.proto */ -static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize); - -/* Capsule.proto */ -static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); - -/* IsLittleEndian.proto */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); - -/* BufferFormatCheck.proto */ -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type); - -/* TypeInfoCompare.proto */ -static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); - -/* MemviewSliceValidateAndInit.proto */ -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* MemviewSliceCopyTemplate.proto */ -static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ - -/* Module declarations from 'cython.view' */ - -/* Module declarations from 'cython' */ - -/* Module declarations from 'monotonic_align.core' */ -static PyTypeObject *__pyx_array_type = 0; -static PyTypeObject *__pyx_MemviewEnum_type = 0; -static PyTypeObject *__pyx_memoryview_type = 0; -static PyTypeObject *__pyx_memoryviewslice_type = 0; -static PyObject *generic = 0; -static PyObject *strided = 0; -static PyObject *indirect = 0; -static PyObject *contiguous = 0; -static PyObject *indirect_contiguous = 0; -static int __pyx_memoryview_thread_locks_used; -static PyThread_type_lock __pyx_memoryview_thread_locks[8]; -static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ -static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ -static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ -static void *__pyx_align_pointer(void *, size_t); /*proto*/ -static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ -static PyObject *_unellipsify(PyObject *, int); /*proto*/ -static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ -static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ -static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ -static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ -static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ -static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ -static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ -static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ -static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; -static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; -#define __Pyx_MODULE_NAME "monotonic_align.core" -extern int __pyx_module_is_main_monotonic_align__core; -int __pyx_module_is_main_monotonic_align__core = 0; - -/* Implementation of 'monotonic_align.core' */ -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_MemoryError; -static PyObject *__pyx_builtin_enumerate; -static PyObject *__pyx_builtin_TypeError; -static PyObject *__pyx_builtin_Ellipsis; -static PyObject *__pyx_builtin_id; -static PyObject *__pyx_builtin_IndexError; -static const char __pyx_k_O[] = "O"; -static const char __pyx_k_c[] = "c"; -static const char __pyx_k_id[] = "id"; -static const char __pyx_k_new[] = "__new__"; -static const char __pyx_k_obj[] = "obj"; -static const char __pyx_k_base[] = "base"; -static const char __pyx_k_dict[] = "__dict__"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_mode[] = "mode"; -static const char __pyx_k_name[] = "name"; -static const char __pyx_k_ndim[] = "ndim"; -static const char __pyx_k_pack[] = "pack"; -static const char __pyx_k_size[] = "size"; -static const char __pyx_k_step[] = "step"; -static const char __pyx_k_stop[] = "stop"; -static const char __pyx_k_t_xs[] = "t_xs"; -static const char __pyx_k_t_ys[] = "t_ys"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_ASCII[] = "ASCII"; -static const char __pyx_k_class[] = "__class__"; -static const char __pyx_k_error[] = "error"; -static const char __pyx_k_flags[] = "flags"; -static const char __pyx_k_paths[] = "paths"; -static const char __pyx_k_range[] = "range"; -static const char __pyx_k_shape[] = "shape"; -static const char __pyx_k_start[] = "start"; -static const char __pyx_k_encode[] = "encode"; -static const char __pyx_k_format[] = "format"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_name_2[] = "__name__"; -static const char __pyx_k_pickle[] = "pickle"; -static const char __pyx_k_reduce[] = "__reduce__"; -static const char __pyx_k_struct[] = "struct"; -static const char __pyx_k_unpack[] = "unpack"; -static const char __pyx_k_update[] = "update"; -static const char __pyx_k_values[] = "values"; -static const char __pyx_k_fortran[] = "fortran"; -static const char __pyx_k_memview[] = "memview"; -static const char __pyx_k_Ellipsis[] = "Ellipsis"; -static const char __pyx_k_getstate[] = "__getstate__"; -static const char __pyx_k_itemsize[] = "itemsize"; -static const char __pyx_k_pyx_type[] = "__pyx_type"; -static const char __pyx_k_setstate[] = "__setstate__"; -static const char __pyx_k_TypeError[] = "TypeError"; -static const char __pyx_k_enumerate[] = "enumerate"; -static const char __pyx_k_pyx_state[] = "__pyx_state"; -static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; -static const char __pyx_k_IndexError[] = "IndexError"; -static const char __pyx_k_ValueError[] = "ValueError"; -static const char __pyx_k_pyx_result[] = "__pyx_result"; -static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; -static const char __pyx_k_MemoryError[] = "MemoryError"; -static const char __pyx_k_PickleError[] = "PickleError"; -static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; -static const char __pyx_k_stringsource[] = "stringsource"; -static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; -static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; -static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; -static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; -static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; -static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; -static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; -static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_strided_and_direct[] = ""; -static const char __pyx_k_strided_and_indirect[] = ""; -static const char __pyx_k_contiguous_and_direct[] = ""; -static const char __pyx_k_MemoryView_of_r_object[] = ""; -static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; -static const char __pyx_k_contiguous_and_indirect[] = ""; -static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; -static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; -static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; -static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; -static const char __pyx_k_strided_and_direct_or_indirect[] = ""; -static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; -static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; -static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; -static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; -static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; -static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; -static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; -static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; -static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; -static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; -static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; -static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; -static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; -static PyObject *__pyx_n_s_ASCII; -static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; -static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; -static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; -static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; -static PyObject *__pyx_kp_s_Cannot_index_with_type_s; -static PyObject *__pyx_n_s_Ellipsis; -static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; -static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; -static PyObject *__pyx_n_s_IndexError; -static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; -static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; -static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; -static PyObject *__pyx_n_s_MemoryError; -static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; -static PyObject *__pyx_kp_s_MemoryView_of_r_object; -static PyObject *__pyx_n_b_O; -static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; -static PyObject *__pyx_n_s_PickleError; -static PyObject *__pyx_n_s_TypeError; -static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; -static PyObject *__pyx_n_s_ValueError; -static PyObject *__pyx_n_s_View_MemoryView; -static PyObject *__pyx_n_s_allocate_buffer; -static PyObject *__pyx_n_s_base; -static PyObject *__pyx_n_s_c; -static PyObject *__pyx_n_u_c; -static PyObject *__pyx_n_s_class; -static PyObject *__pyx_n_s_cline_in_traceback; -static PyObject *__pyx_kp_s_contiguous_and_direct; -static PyObject *__pyx_kp_s_contiguous_and_indirect; -static PyObject *__pyx_n_s_dict; -static PyObject *__pyx_n_s_dtype_is_object; -static PyObject *__pyx_n_s_encode; -static PyObject *__pyx_n_s_enumerate; -static PyObject *__pyx_n_s_error; -static PyObject *__pyx_n_s_flags; -static PyObject *__pyx_n_s_format; -static PyObject *__pyx_n_s_fortran; -static PyObject *__pyx_n_u_fortran; -static PyObject *__pyx_n_s_getstate; -static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; -static PyObject *__pyx_n_s_id; -static PyObject *__pyx_n_s_import; -static PyObject *__pyx_n_s_itemsize; -static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; -static PyObject *__pyx_n_s_main; -static PyObject *__pyx_n_s_memview; -static PyObject *__pyx_n_s_mode; -static PyObject *__pyx_n_s_name; -static PyObject *__pyx_n_s_name_2; -static PyObject *__pyx_n_s_ndim; -static PyObject *__pyx_n_s_new; -static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; -static PyObject *__pyx_n_s_obj; -static PyObject *__pyx_n_s_pack; -static PyObject *__pyx_n_s_paths; -static PyObject *__pyx_n_s_pickle; -static PyObject *__pyx_n_s_pyx_PickleError; -static PyObject *__pyx_n_s_pyx_checksum; -static PyObject *__pyx_n_s_pyx_getbuffer; -static PyObject *__pyx_n_s_pyx_result; -static PyObject *__pyx_n_s_pyx_state; -static PyObject *__pyx_n_s_pyx_type; -static PyObject *__pyx_n_s_pyx_unpickle_Enum; -static PyObject *__pyx_n_s_pyx_vtable; -static PyObject *__pyx_n_s_range; -static PyObject *__pyx_n_s_reduce; -static PyObject *__pyx_n_s_reduce_cython; -static PyObject *__pyx_n_s_reduce_ex; -static PyObject *__pyx_n_s_setstate; -static PyObject *__pyx_n_s_setstate_cython; -static PyObject *__pyx_n_s_shape; -static PyObject *__pyx_n_s_size; -static PyObject *__pyx_n_s_start; -static PyObject *__pyx_n_s_step; -static PyObject *__pyx_n_s_stop; -static PyObject *__pyx_kp_s_strided_and_direct; -static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; -static PyObject *__pyx_kp_s_strided_and_indirect; -static PyObject *__pyx_kp_s_stringsource; -static PyObject *__pyx_n_s_struct; -static PyObject *__pyx_n_s_t_xs; -static PyObject *__pyx_n_s_t_ys; -static PyObject *__pyx_n_s_test; -static PyObject *__pyx_kp_s_unable_to_allocate_array_data; -static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; -static PyObject *__pyx_n_s_unpack; -static PyObject *__pyx_n_s_update; -static PyObject *__pyx_n_s_values; -static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_184977713; -static PyObject *__pyx_int_neg_1; -static float __pyx_k_; -static PyObject *__pyx_tuple__2; -static PyObject *__pyx_tuple__3; -static PyObject *__pyx_tuple__4; -static PyObject *__pyx_tuple__5; -static PyObject *__pyx_tuple__6; -static PyObject *__pyx_tuple__7; -static PyObject *__pyx_tuple__8; -static PyObject *__pyx_tuple__9; -static PyObject *__pyx_slice__16; -static PyObject *__pyx_tuple__10; -static PyObject *__pyx_tuple__11; -static PyObject *__pyx_tuple__12; -static PyObject *__pyx_tuple__13; -static PyObject *__pyx_tuple__14; -static PyObject *__pyx_tuple__15; -static PyObject *__pyx_tuple__17; -static PyObject *__pyx_tuple__18; -static PyObject *__pyx_tuple__19; -static PyObject *__pyx_tuple__20; -static PyObject *__pyx_tuple__21; -static PyObject *__pyx_tuple__22; -static PyObject *__pyx_tuple__23; -static PyObject *__pyx_tuple__24; -static PyObject *__pyx_tuple__25; -static PyObject *__pyx_codeobj__26; -/* Late includes */ - -/* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - -static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { - float __pyx_v_max_neg_val = __pyx_k_; - int __pyx_v_x; - int __pyx_v_y; - float __pyx_v_v_prev; - float __pyx_v_v_cur; - int __pyx_v_index; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - long __pyx_t_4; - int __pyx_t_5; - long __pyx_t_6; - long __pyx_t_7; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - Py_ssize_t __pyx_t_10; - float __pyx_t_11; - float __pyx_t_12; - float __pyx_t_13; - int __pyx_t_14; - Py_ssize_t __pyx_t_15; - Py_ssize_t __pyx_t_16; - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; - } - } - - /* "monotonic_align/core.pyx":13 - * cdef float v_cur - * cdef float tmp - * cdef int index = t_x - 1 # <<<<<<<<<<<<<< - * - * for y in range(t_y): - */ - __pyx_v_index = (__pyx_v_t_x - 1); - - /* "monotonic_align/core.pyx":15 - * cdef int index = t_x - 1 - * - * for y in range(t_y): # <<<<<<<<<<<<<< - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: - */ - __pyx_t_1 = __pyx_v_t_y; - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_y = __pyx_t_3; - - /* "monotonic_align/core.pyx":16 - * - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< - * if x == y: - * v_cur = max_neg_val - */ - __pyx_t_4 = (__pyx_v_y + 1); - __pyx_t_5 = __pyx_v_t_x; - if (((__pyx_t_4 < __pyx_t_5) != 0)) { - __pyx_t_6 = __pyx_t_4; - } else { - __pyx_t_6 = __pyx_t_5; - } - __pyx_t_4 = __pyx_t_6; - __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); - __pyx_t_6 = 0; - if (((__pyx_t_5 > __pyx_t_6) != 0)) { - __pyx_t_7 = __pyx_t_5; - } else { - __pyx_t_7 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_4; - for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { - __pyx_v_x = __pyx_t_5; - - /* "monotonic_align/core.pyx":17 - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: # <<<<<<<<<<<<<< - * v_cur = max_neg_val - * else: - */ - __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":18 - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: - * v_cur = max_neg_val # <<<<<<<<<<<<<< - * else: - * v_cur = value[y-1, x] - */ - __pyx_v_v_cur = __pyx_v_max_neg_val; - - /* "monotonic_align/core.pyx":17 - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: # <<<<<<<<<<<<<< - * v_cur = max_neg_val - * else: - */ - goto __pyx_L7; - } - - /* "monotonic_align/core.pyx":20 - * v_cur = max_neg_val - * else: - * v_cur = value[y-1, x] # <<<<<<<<<<<<<< - * if x == 0: - * if y == 0: - */ - /*else*/ { - __pyx_t_9 = (__pyx_v_y - 1); - __pyx_t_10 = __pyx_v_x; - __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); - } - __pyx_L7:; - - /* "monotonic_align/core.pyx":21 - * else: - * v_cur = value[y-1, x] - * if x == 0: # <<<<<<<<<<<<<< - * if y == 0: - * v_prev = 0. - */ - __pyx_t_8 = ((__pyx_v_x == 0) != 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":22 - * v_cur = value[y-1, x] - * if x == 0: - * if y == 0: # <<<<<<<<<<<<<< - * v_prev = 0. - * else: - */ - __pyx_t_8 = ((__pyx_v_y == 0) != 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":23 - * if x == 0: - * if y == 0: - * v_prev = 0. # <<<<<<<<<<<<<< - * else: - * v_prev = max_neg_val - */ - __pyx_v_v_prev = 0.; - - /* "monotonic_align/core.pyx":22 - * v_cur = value[y-1, x] - * if x == 0: - * if y == 0: # <<<<<<<<<<<<<< - * v_prev = 0. - * else: - */ - goto __pyx_L9; - } - - /* "monotonic_align/core.pyx":25 - * v_prev = 0. - * else: - * v_prev = max_neg_val # <<<<<<<<<<<<<< - * else: - * v_prev = value[y-1, x-1] - */ - /*else*/ { - __pyx_v_v_prev = __pyx_v_max_neg_val; - } - __pyx_L9:; - - /* "monotonic_align/core.pyx":21 - * else: - * v_cur = value[y-1, x] - * if x == 0: # <<<<<<<<<<<<<< - * if y == 0: - * v_prev = 0. - */ - goto __pyx_L8; - } - - /* "monotonic_align/core.pyx":27 - * v_prev = max_neg_val - * else: - * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< - * value[y, x] += max(v_prev, v_cur) - * - */ - /*else*/ { - __pyx_t_10 = (__pyx_v_y - 1); - __pyx_t_9 = (__pyx_v_x - 1); - __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); - } - __pyx_L8:; - - /* "monotonic_align/core.pyx":28 - * else: - * v_prev = value[y-1, x-1] - * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< - * - * for y in range(t_y - 1, -1, -1): - */ - __pyx_t_11 = __pyx_v_v_cur; - __pyx_t_12 = __pyx_v_v_prev; - if (((__pyx_t_11 > __pyx_t_12) != 0)) { - __pyx_t_13 = __pyx_t_11; - } else { - __pyx_t_13 = __pyx_t_12; - } - __pyx_t_9 = __pyx_v_y; - __pyx_t_10 = __pyx_v_x; - *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; - } - } - - /* "monotonic_align/core.pyx":30 - * value[y, x] += max(v_prev, v_cur) - * - * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - */ - for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_y = __pyx_t_1; - - /* "monotonic_align/core.pyx":31 - * - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 # <<<<<<<<<<<<<< - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - * index = index - 1 - */ - __pyx_t_10 = __pyx_v_y; - __pyx_t_9 = __pyx_v_index; - *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; - - /* "monotonic_align/core.pyx":32 - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< - * index = index - 1 - * - */ - __pyx_t_14 = ((__pyx_v_index != 0) != 0); - if (__pyx_t_14) { - } else { - __pyx_t_8 = __pyx_t_14; - goto __pyx_L13_bool_binop_done; - } - __pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0); - if (!__pyx_t_14) { - } else { - __pyx_t_8 = __pyx_t_14; - goto __pyx_L13_bool_binop_done; - } - __pyx_t_9 = (__pyx_v_y - 1); - __pyx_t_10 = __pyx_v_index; - __pyx_t_15 = (__pyx_v_y - 1); - __pyx_t_16 = (__pyx_v_index - 1); - __pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0); - __pyx_t_8 = __pyx_t_14; - __pyx_L13_bool_binop_done:; - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":33 - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - * index = index - 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_index = (__pyx_v_index - 1); - - /* "monotonic_align/core.pyx":32 - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< - * index = index - 1 - * - */ - } - } - - /* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - - /* function exit code */ -} - -/* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { - CYTHON_UNUSED int __pyx_v_b; - int __pyx_v_i; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; - Py_ssize_t __pyx_t_6; - Py_ssize_t __pyx_t_7; - - /* "monotonic_align/core.pyx":39 - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: - * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< - * cdef int i - * for i in prange(b, nogil=True): - */ - __pyx_v_b = (__pyx_v_paths.shape[0]); - - /* "monotonic_align/core.pyx":41 - * cdef int b = paths.shape[0] - * cdef int i - * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) - */ - { - #ifdef WITH_THREAD - PyThreadState *_save; - Py_UNBLOCK_THREADS - __Pyx_FastGIL_Remember(); - #endif - /*try:*/ { - __pyx_t_1 = __pyx_v_b; - if ((1 == 0)) abort(); - { - #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) - #undef likely - #undef unlikely - #define likely(x) (x) - #define unlikely(x) (x) - #endif - __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; - if (__pyx_t_3 > 0) - { - #ifdef _OPENMP - #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) - #endif /* _OPENMP */ - { - #ifdef _OPENMP - #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) - #endif /* _OPENMP */ - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ - { - __pyx_v_i = (int)(0 + 1 * __pyx_t_2); - - /* "monotonic_align/core.pyx":42 - * cdef int i - * for i in prange(b, nogil=True): - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< - */ - __pyx_t_4.data = __pyx_v_paths.data; - __pyx_t_4.memview = __pyx_v_paths.memview; - __PYX_INC_MEMVIEW(&__pyx_t_4, 0); - { - Py_ssize_t __pyx_tmp_idx = __pyx_v_i; - Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; - __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; -} - -__pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; -__pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; - __pyx_t_4.suboffsets[0] = -1; - -__pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; -__pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; - __pyx_t_4.suboffsets[1] = -1; - -__pyx_t_5.data = __pyx_v_values.data; - __pyx_t_5.memview = __pyx_v_values.memview; - __PYX_INC_MEMVIEW(&__pyx_t_5, 0); - { - Py_ssize_t __pyx_tmp_idx = __pyx_v_i; - Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; - __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; -} - -__pyx_t_5.shape[0] = __pyx_v_values.shape[1]; -__pyx_t_5.strides[0] = __pyx_v_values.strides[1]; - __pyx_t_5.suboffsets[0] = -1; - -__pyx_t_5.shape[1] = __pyx_v_values.shape[2]; -__pyx_t_5.strides[1] = __pyx_v_values.strides[2]; - __pyx_t_5.suboffsets[1] = -1; - -__pyx_t_6 = __pyx_v_i; - __pyx_t_7 = __pyx_v_i; - __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); - __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); - __pyx_t_4.memview = NULL; - __pyx_t_4.data = NULL; - __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); - __pyx_t_5.memview = NULL; - __pyx_t_5.data = NULL; - } - } - } - } - } - #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) - #undef likely - #undef unlikely - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #endif - } - - /* "monotonic_align/core.pyx":41 - * cdef int b = paths.shape[0] - * cdef int i - * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) - */ - /*finally:*/ { - /*normal exit:*/{ - #ifdef WITH_THREAD - __Pyx_FastGIL_Forget(); - Py_BLOCK_THREADS - #endif - goto __pyx_L5; - } - __pyx_L5:; - } - } - - /* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - - /* function exit code */ -} - -/* Python wrapper */ -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; - PyObject* values[4] = {0,0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - } - __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("maximum_path_c", 0); - __Pyx_XDECREF(__pyx_r); - if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) } - __pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - -/* Python wrapper */ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_shape = 0; - Py_ssize_t __pyx_v_itemsize; - PyObject *__pyx_v_format = 0; - PyObject *__pyx_v_mode = 0; - int __pyx_v_allocate_buffer; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; - PyObject* values[5] = {0,0,0,0,0}; - values[3] = ((PyObject *)__pyx_n_s_c); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); - if (value) { values[3] = value; kw_args--; } - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); - if (value) { values[4] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_shape = ((PyObject*)values[0]); - __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) - __pyx_v_format = values[2]; - __pyx_v_mode = values[3]; - if (values[4]) { - __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) - } else { - - /* "View.MemoryView":123 - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, - * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< - * - * cdef int idx - */ - __pyx_v_allocate_buffer = ((int)1); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) - if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { - PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) - } - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); - - /* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { - int __pyx_v_idx; - Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_dim; - PyObject **__pyx_v_p; - char __pyx_v_order; - int __pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - char *__pyx_t_7; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - PyObject *__pyx_t_10 = NULL; - Py_ssize_t __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - __Pyx_INCREF(__pyx_v_format); - - /* "View.MemoryView":129 - * cdef PyObject **p - * - * self.ndim = len(shape) # <<<<<<<<<<<<<< - * self.itemsize = itemsize - * - */ - if (unlikely(__pyx_v_shape == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(1, 129, __pyx_L1_error) - } - __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) - __pyx_v_self->ndim = ((int)__pyx_t_1); - - /* "View.MemoryView":130 - * - * self.ndim = len(shape) - * self.itemsize = itemsize # <<<<<<<<<<<<<< - * - * if not self.ndim: - */ - __pyx_v_self->itemsize = __pyx_v_itemsize; - - /* "View.MemoryView":132 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError("Empty shape tuple for cython.array") - * - */ - __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":133 - * - * if not self.ndim: - * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< - * - * if itemsize <= 0: - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 133, __pyx_L1_error) - - /* "View.MemoryView":132 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError("Empty shape tuple for cython.array") - * - */ - } - - /* "View.MemoryView":135 - * raise ValueError("Empty shape tuple for cython.array") - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError("itemsize <= 0 for cython.array") - * - */ - __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":136 - * - * if itemsize <= 0: - * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< - * - * if not isinstance(format, bytes): - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 136, __pyx_L1_error) - - /* "View.MemoryView":135 - * raise ValueError("Empty shape tuple for cython.array") - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError("itemsize <= 0 for cython.array") - * - */ - } - - /* "View.MemoryView":138 - * raise ValueError("itemsize <= 0 for cython.array") - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - __pyx_t_2 = PyBytes_Check(__pyx_v_format); - __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":139 - * - * if not isinstance(format, bytes): - * format = format.encode('ASCII') # <<<<<<<<<<<<<< - * self._format = format # keep a reference to the byte string - * self.format = self._format - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - } - } - __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":138 - * raise ValueError("itemsize <= 0 for cython.array") - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - } - - /* "View.MemoryView":140 - * if not isinstance(format, bytes): - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< - * self.format = self._format - * - */ - if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) - __pyx_t_3 = __pyx_v_format; - __Pyx_INCREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __Pyx_GOTREF(__pyx_v_self->_format); - __Pyx_DECREF(__pyx_v_self->_format); - __pyx_v_self->_format = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":141 - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - * self.format = self._format # <<<<<<<<<<<<<< - * - * - */ - if (unlikely(__pyx_v_self->_format == Py_None)) { - PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); - __PYX_ERR(1, 141, __pyx_L1_error) - } - __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) - __pyx_v_self->format = __pyx_t_7; - - /* "View.MemoryView":144 - * - * - * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< - * self._strides = self._shape + self.ndim - * - */ - __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); - - /* "View.MemoryView":145 - * - * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) - * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< - * - * if not self._shape: - */ - __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); - - /* "View.MemoryView":147 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate shape and strides.") - * - */ - __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":148 - * - * if not self._shape: - * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 148, __pyx_L1_error) - - /* "View.MemoryView":147 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate shape and strides.") - * - */ - } - - /* "View.MemoryView":151 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - */ - __pyx_t_8 = 0; - __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; - for (;;) { - if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_9; - __pyx_v_idx = __pyx_t_8; - __pyx_t_8 = (__pyx_t_8 + 1); - - /* "View.MemoryView":152 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim - */ - __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":153 - * for idx, dim in enumerate(shape): - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< - * self._shape[idx] = dim - * - */ - __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); - __pyx_t_5 = 0; - __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(1, 153, __pyx_L1_error) - - /* "View.MemoryView":152 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim - */ - } - - /* "View.MemoryView":154 - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim # <<<<<<<<<<<<<< - * - * cdef char order - */ - (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; - - /* "View.MemoryView":151 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - */ - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":157 - * - * cdef char order - * if mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) - if (__pyx_t_4) { - - /* "View.MemoryView":158 - * cdef char order - * if mode == 'fortran': - * order = b'F' # <<<<<<<<<<<<<< - * self.mode = u'fortran' - * elif mode == 'c': - */ - __pyx_v_order = 'F'; - - /* "View.MemoryView":159 - * if mode == 'fortran': - * order = b'F' - * self.mode = u'fortran' # <<<<<<<<<<<<<< - * elif mode == 'c': - * order = b'C' - */ - __Pyx_INCREF(__pyx_n_u_fortran); - __Pyx_GIVEREF(__pyx_n_u_fortran); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_fortran; - - /* "View.MemoryView":157 - * - * cdef char order - * if mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - goto __pyx_L10; - } - - /* "View.MemoryView":160 - * order = b'F' - * self.mode = u'fortran' - * elif mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) - if (likely(__pyx_t_4)) { - - /* "View.MemoryView":161 - * self.mode = u'fortran' - * elif mode == 'c': - * order = b'C' # <<<<<<<<<<<<<< - * self.mode = u'c' - * else: - */ - __pyx_v_order = 'C'; - - /* "View.MemoryView":162 - * elif mode == 'c': - * order = b'C' - * self.mode = u'c' # <<<<<<<<<<<<<< - * else: - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) - */ - __Pyx_INCREF(__pyx_n_u_c); - __Pyx_GIVEREF(__pyx_n_u_c); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_c; - - /* "View.MemoryView":160 - * order = b'F' - * self.mode = u'fortran' - * elif mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - goto __pyx_L10; - } - - /* "View.MemoryView":164 - * self.mode = u'c' - * else: - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< - * - * self.len = fill_contig_strides_array(self._shape, self._strides, - */ - /*else*/ { - __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(1, 164, __pyx_L1_error) - } - __pyx_L10:; - - /* "View.MemoryView":166 - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) - * - * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< - * itemsize, self.ndim, order) - * - */ - __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); - - /* "View.MemoryView":169 - * itemsize, self.ndim, order) - * - * self.free_data = allocate_buffer # <<<<<<<<<<<<<< - * self.dtype_is_object = format == b'O' - * if allocate_buffer: - */ - __pyx_v_self->free_data = __pyx_v_allocate_buffer; - - /* "View.MemoryView":170 - * - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< - * if allocate_buffer: - * - */ - __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_v_self->dtype_is_object = __pyx_t_4; - - /* "View.MemoryView":171 - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' - * if allocate_buffer: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_4 = (__pyx_v_allocate_buffer != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":174 - * - * - * self.data = malloc(self.len) # <<<<<<<<<<<<<< - * if not self.data: - * raise MemoryError("unable to allocate array data.") - */ - __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); - - /* "View.MemoryView":175 - * - * self.data = malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate array data.") - * - */ - __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":176 - * self.data = malloc(self.len) - * if not self.data: - * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< - * - * if self.dtype_is_object: - */ - __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(1, 176, __pyx_L1_error) - - /* "View.MemoryView":175 - * - * self.data = malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate array data.") - * - */ - } - - /* "View.MemoryView":178 - * raise MemoryError("unable to allocate array data.") - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = self.data - * for i in range(self.len / itemsize): - */ - __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":179 - * - * if self.dtype_is_object: - * p = self.data # <<<<<<<<<<<<<< - * for i in range(self.len / itemsize): - * p[i] = Py_None - */ - __pyx_v_p = ((PyObject **)__pyx_v_self->data); - - /* "View.MemoryView":180 - * if self.dtype_is_object: - * p = self.data - * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< - * p[i] = Py_None - * Py_INCREF(Py_None) - */ - if (unlikely(__pyx_v_itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(1, 180, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(1, 180, __pyx_L1_error) - } - __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); - __pyx_t_9 = __pyx_t_1; - for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { - __pyx_v_i = __pyx_t_11; - - /* "View.MemoryView":181 - * p = self.data - * for i in range(self.len / itemsize): - * p[i] = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - (__pyx_v_p[__pyx_v_i]) = Py_None; - - /* "View.MemoryView":182 - * for i in range(self.len / itemsize): - * p[i] = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - Py_INCREF(Py_None); - } - - /* "View.MemoryView":178 - * raise MemoryError("unable to allocate array data.") - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = self.data - * for i in range(self.len / itemsize): - */ - } - - /* "View.MemoryView":171 - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' - * if allocate_buffer: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_format); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":185 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * cdef int bufmode = -1 - * if self.mode == u"c": - */ - -/* Python wrapper */ -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_bufmode; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - char *__pyx_t_4; - Py_ssize_t __pyx_t_5; - int __pyx_t_6; - Py_ssize_t *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (__pyx_v_info == NULL) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":186 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 # <<<<<<<<<<<<<< - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_v_bufmode = -1; - - /* "View.MemoryView":187 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":188 - * cdef int bufmode = -1 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":187 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - goto __pyx_L3; - } - - /* "View.MemoryView":189 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":190 - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") - */ - __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":189 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - } - __pyx_L3:; - - /* "View.MemoryView":191 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - */ - __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":192 - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< - * info.buf = self.data - * info.len = self.len - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 192, __pyx_L1_error) - - /* "View.MemoryView":191 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - */ - } - - /* "View.MemoryView":193 - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data # <<<<<<<<<<<<<< - * info.len = self.len - * info.ndim = self.ndim - */ - __pyx_t_4 = __pyx_v_self->data; - __pyx_v_info->buf = __pyx_t_4; - - /* "View.MemoryView":194 - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - * info.len = self.len # <<<<<<<<<<<<<< - * info.ndim = self.ndim - * info.shape = self._shape - */ - __pyx_t_5 = __pyx_v_self->len; - __pyx_v_info->len = __pyx_t_5; - - /* "View.MemoryView":195 - * info.buf = self.data - * info.len = self.len - * info.ndim = self.ndim # <<<<<<<<<<<<<< - * info.shape = self._shape - * info.strides = self._strides - */ - __pyx_t_6 = __pyx_v_self->ndim; - __pyx_v_info->ndim = __pyx_t_6; - - /* "View.MemoryView":196 - * info.len = self.len - * info.ndim = self.ndim - * info.shape = self._shape # <<<<<<<<<<<<<< - * info.strides = self._strides - * info.suboffsets = NULL - */ - __pyx_t_7 = __pyx_v_self->_shape; - __pyx_v_info->shape = __pyx_t_7; - - /* "View.MemoryView":197 - * info.ndim = self.ndim - * info.shape = self._shape - * info.strides = self._strides # <<<<<<<<<<<<<< - * info.suboffsets = NULL - * info.itemsize = self.itemsize - */ - __pyx_t_7 = __pyx_v_self->_strides; - __pyx_v_info->strides = __pyx_t_7; - - /* "View.MemoryView":198 - * info.shape = self._shape - * info.strides = self._strides - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = self.itemsize - * info.readonly = 0 - */ - __pyx_v_info->suboffsets = NULL; - - /* "View.MemoryView":199 - * info.strides = self._strides - * info.suboffsets = NULL - * info.itemsize = self.itemsize # <<<<<<<<<<<<<< - * info.readonly = 0 - * - */ - __pyx_t_5 = __pyx_v_self->itemsize; - __pyx_v_info->itemsize = __pyx_t_5; - - /* "View.MemoryView":200 - * info.suboffsets = NULL - * info.itemsize = self.itemsize - * info.readonly = 0 # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - __pyx_v_info->readonly = 0; - - /* "View.MemoryView":202 - * info.readonly = 0 - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.format - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":203 - * - * if flags & PyBUF_FORMAT: - * info.format = self.format # <<<<<<<<<<<<<< - * else: - * info.format = NULL - */ - __pyx_t_4 = __pyx_v_self->format; - __pyx_v_info->format = __pyx_t_4; - - /* "View.MemoryView":202 - * info.readonly = 0 - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.format - * else: - */ - goto __pyx_L5; - } - - /* "View.MemoryView":205 - * info.format = self.format - * else: - * info.format = NULL # <<<<<<<<<<<<<< - * - * info.obj = self - */ - /*else*/ { - __pyx_v_info->format = NULL; - } - __pyx_L5:; - - /* "View.MemoryView":207 - * info.format = NULL - * - * info.obj = self # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":185 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * cdef int bufmode = -1 - * if self.mode == u"c": - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":211 - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - -/* Python wrapper */ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":212 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data: - */ - __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":213 - * def __dealloc__(array self): - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) # <<<<<<<<<<<<<< - * elif self.free_data: - * if self.dtype_is_object: - */ - __pyx_v_self->callback_free_data(__pyx_v_self->data); - - /* "View.MemoryView":212 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":214 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, - */ - __pyx_t_1 = (__pyx_v_self->free_data != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":215 - * self.callback_free_data(self.data) - * elif self.free_data: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - */ - __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":216 - * elif self.free_data: - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< - * self._strides, self.ndim, False) - * free(self.data) - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); - - /* "View.MemoryView":215 - * self.callback_free_data(self.data) - * elif self.free_data: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - */ - } - - /* "View.MemoryView":218 - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - * free(self.data) # <<<<<<<<<<<<<< - * PyObject_Free(self._shape) - * - */ - free(__pyx_v_self->data); - - /* "View.MemoryView":214 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, - */ - } - __pyx_L3:; - - /* "View.MemoryView":219 - * self._strides, self.ndim, False) - * free(self.data) - * PyObject_Free(self._shape) # <<<<<<<<<<<<<< - * - * @property - */ - PyObject_Free(__pyx_v_self->_shape); - - /* "View.MemoryView":211 - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":222 - * - * @property - * def memview(self): # <<<<<<<<<<<<<< - * return self.get_memview() - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":223 - * @property - * def memview(self): - * return self.get_memview() # <<<<<<<<<<<<<< - * - * @cname('get_memview') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":222 - * - * @property - * def memview(self): # <<<<<<<<<<<<<< - * return self.get_memview() - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":226 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_memview", 0); - - /* "View.MemoryView":227 - * @cname('get_memview') - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< - * return memoryview(self, flags, self.dtype_is_object) - * - */ - __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); - - /* "View.MemoryView":228 - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":226 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":230 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":231 - * - * def __len__(self): - * return self._shape[0] # <<<<<<<<<<<<<< - * - * def __getattr__(self, attr): - */ - __pyx_r = (__pyx_v_self->_shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":230 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":233 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getattr__", 0); - - /* "View.MemoryView":234 - * - * def __getattr__(self, attr): - * return getattr(self.memview, attr) # <<<<<<<<<<<<<< - * - * def __getitem__(self, item): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":233 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":236 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":237 - * - * def __getitem__(self, item): - * return self.memview[item] # <<<<<<<<<<<<<< - * - * def __setitem__(self, item, value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":236 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":239 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - -/* Python wrapper */ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - - /* "View.MemoryView":240 - * - * def __setitem__(self, item, value): - * self.memview[item] = value # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":239 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":244 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< - * char *mode, char *buf): - * cdef array result - */ - -static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { - struct __pyx_array_obj *__pyx_v_result = 0; - struct __pyx_array_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("array_cwrapper", 0); - - /* "View.MemoryView":248 - * cdef array result - * - * if buf == NULL: # <<<<<<<<<<<<<< - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - */ - __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":249 - * - * if buf == NULL: - * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), - */ - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":248 - * cdef array result - * - * if buf == NULL: # <<<<<<<<<<<<<< - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":251 - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< - * allocate_buffer=False) - * result.data = buf - */ - /*else*/ { - __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); - __pyx_t_4 = 0; - __pyx_t_5 = 0; - __pyx_t_3 = 0; - - /* "View.MemoryView":252 - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), - * allocate_buffer=False) # <<<<<<<<<<<<<< - * result.data = buf - * - */ - __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) - - /* "View.MemoryView":251 - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< - * allocate_buffer=False) - * result.data = buf - */ - __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); - __pyx_t_5 = 0; - - /* "View.MemoryView":253 - * result = array(shape, itemsize, format, mode.decode('ASCII'), - * allocate_buffer=False) - * result.data = buf # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->data = __pyx_v_buf; - } - __pyx_L3:; - - /* "View.MemoryView":255 - * result.data = buf - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":244 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< - * char *mode, char *buf): - * cdef array result - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":281 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - -/* Python wrapper */ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_name = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; - PyObject* values[1] = {0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - } - __pyx_v_name = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__", 0); - - /* "View.MemoryView":282 - * cdef object name - * def __init__(self, name): - * self.name = name # <<<<<<<<<<<<<< - * def __repr__(self): - * return self.name - */ - __Pyx_INCREF(__pyx_v_name); - __Pyx_GIVEREF(__pyx_v_name); - __Pyx_GOTREF(__pyx_v_self->name); - __Pyx_DECREF(__pyx_v_self->name); - __pyx_v_self->name = __pyx_v_name; - - /* "View.MemoryView":281 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - - /* function exit code */ - __pyx_r = 0; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":283 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - -/* Python wrapper */ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":284 - * self.name = name - * def __repr__(self): - * return self.name # <<<<<<<<<<<<<< - * - * cdef generic = Enum("") - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->name); - __pyx_r = __pyx_v_self->name; - goto __pyx_L0; - - /* "View.MemoryView":283 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_v_state = 0; - PyObject *__pyx_v__dict = 0; - int __pyx_v_use_setstate; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":5 - * cdef object _dict - * cdef bint use_setstate - * state = (self.name,) # <<<<<<<<<<<<<< - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_self->name); - __Pyx_GIVEREF(__pyx_v_self->name); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); - __pyx_v_state = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "(tree fragment)":6 - * cdef bint use_setstate - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< - * if _dict is not None: - * state += (_dict,) - */ - __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v__dict = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - __pyx_t_2 = (__pyx_v__dict != Py_None); - __pyx_t_3 = (__pyx_t_2 != 0); - if (__pyx_t_3) { - - /* "(tree fragment)":8 - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - * state += (_dict,) # <<<<<<<<<<<<<< - * use_setstate = True - * else: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v__dict); - __Pyx_GIVEREF(__pyx_v__dict); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); - __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); - __pyx_t_4 = 0; - - /* "(tree fragment)":9 - * if _dict is not None: - * state += (_dict,) - * use_setstate = True # <<<<<<<<<<<<<< - * else: - * use_setstate = self.name is not None - */ - __pyx_v_use_setstate = 1; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - goto __pyx_L3; - } - - /* "(tree fragment)":11 - * use_setstate = True - * else: - * use_setstate = self.name is not None # <<<<<<<<<<<<<< - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - */ - /*else*/ { - __pyx_t_3 = (__pyx_v_self->name != Py_None); - __pyx_v_use_setstate = __pyx_t_3; - } - __pyx_L3:; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - */ - __pyx_t_3 = (__pyx_v_use_setstate != 0); - if (__pyx_t_3) { - - /* "(tree fragment)":13 - * use_setstate = self.name is not None - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_184977713); - __Pyx_GIVEREF(__pyx_int_184977713); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); - __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); - __pyx_t_4 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - */ - } - - /* "(tree fragment)":15 - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_184977713); - __Pyx_GIVEREF(__pyx_int_184977713); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); - __pyx_t_5 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - } - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_state); - __Pyx_XDECREF(__pyx_v__dict); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":17 - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) - __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":298 - * - * @cname('__pyx_align_pointer') - * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = memory - */ - -static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { - Py_intptr_t __pyx_v_aligned_p; - size_t __pyx_v_offset; - void *__pyx_r; - int __pyx_t_1; - - /* "View.MemoryView":300 - * cdef void *align_pointer(void *memory, size_t alignment) nogil: - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = memory # <<<<<<<<<<<<<< - * cdef size_t offset - * - */ - __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); - - /* "View.MemoryView":304 - * - * with cython.cdivision(True): - * offset = aligned_p % alignment # <<<<<<<<<<<<<< - * - * if offset > 0: - */ - __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); - - /* "View.MemoryView":306 - * offset = aligned_p % alignment - * - * if offset > 0: # <<<<<<<<<<<<<< - * aligned_p += alignment - offset - * - */ - __pyx_t_1 = ((__pyx_v_offset > 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":307 - * - * if offset > 0: - * aligned_p += alignment - offset # <<<<<<<<<<<<<< - * - * return aligned_p - */ - __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); - - /* "View.MemoryView":306 - * offset = aligned_p % alignment - * - * if offset > 0: # <<<<<<<<<<<<<< - * aligned_p += alignment - offset - * - */ - } - - /* "View.MemoryView":309 - * aligned_p += alignment - offset - * - * return aligned_p # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = ((void *)__pyx_v_aligned_p); - goto __pyx_L0; - - /* "View.MemoryView":298 - * - * @cname('__pyx_align_pointer') - * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = memory - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":345 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - -/* Python wrapper */ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_obj = 0; - int __pyx_v_flags; - int __pyx_v_dtype_is_object; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_obj = values[0]; - __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) - if (values[2]) { - __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) - } else { - __pyx_v_dtype_is_object = ((int)0); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - - /* "View.MemoryView":346 - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj # <<<<<<<<<<<<<< - * self.flags = flags - * if type(self) is memoryview or obj is not None: - */ - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - __Pyx_GOTREF(__pyx_v_self->obj); - __Pyx_DECREF(__pyx_v_self->obj); - __pyx_v_self->obj = __pyx_v_obj; - - /* "View.MemoryView":347 - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj - * self.flags = flags # <<<<<<<<<<<<<< - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - */ - __pyx_v_self->flags = __pyx_v_flags; - - /* "View.MemoryView":348 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - */ - __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); - __pyx_t_3 = (__pyx_t_2 != 0); - if (!__pyx_t_3) { - } else { - __pyx_t_1 = __pyx_t_3; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_3 = (__pyx_v_obj != Py_None); - __pyx_t_2 = (__pyx_t_3 != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (__pyx_t_1) { - - /* "View.MemoryView":349 - * self.flags = flags - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - */ - __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) - - /* "View.MemoryView":350 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":351 - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; - - /* "View.MemoryView":352 - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * global __pyx_memoryview_thread_locks_used - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":350 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - } - - /* "View.MemoryView":348 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - */ - } - - /* "View.MemoryView":355 - * - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":356 - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - */ - __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - - /* "View.MemoryView":357 - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); - - /* "View.MemoryView":355 - * - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - } - - /* "View.MemoryView":358 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":359 - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< - * if self.lock is NULL: - * raise MemoryError - */ - __pyx_v_self->lock = PyThread_allocate_lock(); - - /* "View.MemoryView":360 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":361 - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) - - /* "View.MemoryView":360 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - } - - /* "View.MemoryView":358 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - } - - /* "View.MemoryView":363 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":364 - * - * if flags & PyBUF_FORMAT: - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< - * else: - * self.dtype_is_object = dtype_is_object - */ - __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L11_bool_binop_done:; - __pyx_v_self->dtype_is_object = __pyx_t_1; - - /* "View.MemoryView":363 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - goto __pyx_L10; - } - - /* "View.MemoryView":366 - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< - * - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( - */ - /*else*/ { - __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; - } - __pyx_L10:; - - /* "View.MemoryView":368 - * self.dtype_is_object = dtype_is_object - * - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< - * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) - * self.typeinfo = NULL - */ - __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); - - /* "View.MemoryView":370 - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( - * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) - * self.typeinfo = NULL # <<<<<<<<<<<<<< - * - * def __dealloc__(memoryview self): - */ - __pyx_v_self->typeinfo = NULL; - - /* "View.MemoryView":345 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":372 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - -/* Python wrapper */ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { - int __pyx_v_i; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - PyThread_type_lock __pyx_t_6; - PyThread_type_lock __pyx_t_7; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":373 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - __pyx_t_1 = (__pyx_v_self->obj != Py_None); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":374 - * def __dealloc__(memoryview self): - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - */ - __Pyx_ReleaseBuffer((&__pyx_v_self->view)); - - /* "View.MemoryView":373 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":375 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":377 - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< - * Py_DECREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; - - /* "View.MemoryView":378 - * - * (<__pyx_buffer *> &self.view).obj = NULL - * Py_DECREF(Py_None) # <<<<<<<<<<<<<< - * - * cdef int i - */ - Py_DECREF(Py_None); - - /* "View.MemoryView":375 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - } - __pyx_L3:; - - /* "View.MemoryView":382 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":383 - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - */ - __pyx_t_3 = __pyx_memoryview_thread_locks_used; - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { - __pyx_v_i = __pyx_t_5; - - /* "View.MemoryView":384 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":385 - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); - - /* "View.MemoryView":386 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":388 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< - * break - * else: - */ - __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); - - /* "View.MemoryView":387 - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break - */ - (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; - (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; - - /* "View.MemoryView":386 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - } - - /* "View.MemoryView":389 - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break # <<<<<<<<<<<<<< - * else: - * PyThread_free_lock(self.lock) - */ - goto __pyx_L6_break; - - /* "View.MemoryView":384 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - } - } - /*else*/ { - - /* "View.MemoryView":391 - * break - * else: - * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - */ - PyThread_free_lock(__pyx_v_self->lock); - } - __pyx_L6_break:; - - /* "View.MemoryView":382 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - } - - /* "View.MemoryView":372 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":393 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf - */ - -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - Py_ssize_t __pyx_v_dim; - char *__pyx_v_itemp; - PyObject *__pyx_v_idx = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t __pyx_t_3; - PyObject *(*__pyx_t_4)(PyObject *); - PyObject *__pyx_t_5 = NULL; - Py_ssize_t __pyx_t_6; - char *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_item_pointer", 0); - - /* "View.MemoryView":395 - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< - * - * for dim, idx in enumerate(index): - */ - __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); - - /* "View.MemoryView":397 - * cdef char *itemp = self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - __pyx_t_1 = 0; - if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { - __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; - __pyx_t_4 = NULL; - } else { - __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_4)) { - if (likely(PyList_CheckExact(__pyx_t_2))) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } else { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } - } else { - __pyx_t_5 = __pyx_t_4(__pyx_t_2); - if (unlikely(!__pyx_t_5)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 397, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_5); - } - __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_1; - __pyx_t_1 = (__pyx_t_1 + 1); - - /* "View.MemoryView":398 - * - * for dim, idx in enumerate(index): - * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< - * - * return itemp - */ - __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) - __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_7; - - /* "View.MemoryView":397 - * cdef char *itemp = self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":400 - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - * return itemp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_itemp; - goto __pyx_L0; - - /* "View.MemoryView":393 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_idx); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":403 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_indices = NULL; - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - char *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":404 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":405 - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: - * return self # <<<<<<<<<<<<<< - * - * have_slices, indices = _unellipsify(index, self.view.ndim) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __pyx_r = ((PyObject *)__pyx_v_self); - goto __pyx_L0; - - /* "View.MemoryView":404 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - } - - /* "View.MemoryView":407 - * return self - * - * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * cdef char *itemp - */ - __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (likely(__pyx_t_3 != Py_None)) { - PyObject* sequence = __pyx_t_3; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(1, 407, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - #else - __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_v_indices = __pyx_t_5; - __pyx_t_5 = 0; - - /* "View.MemoryView":410 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) - if (__pyx_t_2) { - - /* "View.MemoryView":411 - * cdef char *itemp - * if have_slices: - * return memview_slice(self, indices) # <<<<<<<<<<<<<< - * else: - * itemp = self.get_item_pointer(indices) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":410 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - } - - /* "View.MemoryView":413 - * return memview_slice(self, indices) - * else: - * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< - * return self.convert_item_to_object(itemp) - * - */ - /*else*/ { - __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_6; - - /* "View.MemoryView":414 - * else: - * itemp = self.get_item_pointer(indices) - * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< - * - * def __setitem__(memoryview self, object index, object value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":403 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_indices); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":416 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") - */ - -/* Python wrapper */ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_obj = NULL; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - __Pyx_INCREF(__pyx_v_index); - - /* "View.MemoryView":417 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError("Cannot assign to read-only memoryview") - * - */ - __pyx_t_1 = (__pyx_v_self->view.readonly != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":418 - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< - * - * have_slices, index = _unellipsify(index, self.view.ndim) - */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 418, __pyx_L1_error) - - /* "View.MemoryView":417 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError("Cannot assign to read-only memoryview") - * - */ - } - - /* "View.MemoryView":420 - * raise TypeError("Cannot assign to read-only memoryview") - * - * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * if have_slices: - */ - __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (likely(__pyx_t_2 != Py_None)) { - PyObject* sequence = __pyx_t_2; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(1, 420, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - #else - __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - #endif - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_3; - __pyx_t_3 = 0; - __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":422 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":423 - * - * if have_slices: - * obj = self.is_slice(value) # <<<<<<<<<<<<<< - * if obj: - * self.setitem_slice_assignment(self[index], obj) - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_obj = __pyx_t_2; - __pyx_t_2 = 0; - - /* "View.MemoryView":424 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":425 - * obj = self.is_slice(value) - * if obj: - * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< - * else: - * self.setitem_slice_assign_scalar(self[index], value) - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "View.MemoryView":424 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - goto __pyx_L5; - } - - /* "View.MemoryView":427 - * self.setitem_slice_assignment(self[index], obj) - * else: - * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< - * else: - * self.setitem_indexed(index, value) - */ - /*else*/ { - __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L5:; - - /* "View.MemoryView":422 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":429 - * self.setitem_slice_assign_scalar(self[index], value) - * else: - * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< - * - * cdef is_slice(self, obj): - */ - /*else*/ { - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L4:; - - /* "View.MemoryView":416 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":431 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_slice", 0); - __Pyx_INCREF(__pyx_v_obj); - - /* "View.MemoryView":432 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_5); - /*try:*/ { - - /* "View.MemoryView":434 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_6); - - /* "View.MemoryView":435 - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) # <<<<<<<<<<<<<< - * except TypeError: - * return None - */ - __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - - /* "View.MemoryView":434 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); - __pyx_t_6 = 0; - __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - } - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - goto __pyx_L9_try_end; - __pyx_L4_error:; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - - /* "View.MemoryView":436 - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - * except TypeError: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); - if (__pyx_t_9) { - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_GOTREF(__pyx_t_8); - __Pyx_GOTREF(__pyx_t_6); - - /* "View.MemoryView":437 - * self.dtype_is_object) - * except TypeError: - * return None # <<<<<<<<<<<<<< - * - * return obj - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L7_except_return; - } - goto __pyx_L6_except_error; - __pyx_L6_except_error:; - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L1_error; - __pyx_L7_except_return:; - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L0; - __pyx_L9_try_end:; - } - - /* "View.MemoryView":432 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - } - - /* "View.MemoryView":439 - * return None - * - * return obj # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assignment(self, dst, src): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_obj); - __pyx_r = __pyx_v_obj; - goto __pyx_L0; - - /* "View.MemoryView":431 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":441 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { - __Pyx_memviewslice __pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_src_slice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - __Pyx_memviewslice *__pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); - - /* "View.MemoryView":445 - * cdef __Pyx_memviewslice src_slice - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) - */ - if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) - - /* "View.MemoryView":446 - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], - * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< - * src.ndim, dst.ndim, self.dtype_is_object) - * - */ - if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) - __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) - - /* "View.MemoryView":447 - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":445 - * cdef __Pyx_memviewslice src_slice - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) - */ - __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) - - /* "View.MemoryView":441 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":449 - * src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { - int __pyx_v_array[0x80]; - void *__pyx_v_tmp; - void *__pyx_v_item; - __Pyx_memviewslice *__pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_tmp_slice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - char const *__pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); - - /* "View.MemoryView":451 - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - * cdef int array[128] - * cdef void *tmp = NULL # <<<<<<<<<<<<<< - * cdef void *item - * - */ - __pyx_v_tmp = NULL; - - /* "View.MemoryView":456 - * cdef __Pyx_memviewslice *dst_slice - * cdef __Pyx_memviewslice tmp_slice - * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< - * - * if self.view.itemsize > sizeof(array): - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) - __pyx_v_dst_slice = __pyx_t_1; - - /* "View.MemoryView":458 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":459 - * - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< - * if tmp == NULL: - * raise MemoryError - */ - __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); - - /* "View.MemoryView":460 - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":461 - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * item = tmp - * else: - */ - PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) - - /* "View.MemoryView":460 - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - } - - /* "View.MemoryView":462 - * if tmp == NULL: - * raise MemoryError - * item = tmp # <<<<<<<<<<<<<< - * else: - * item = array - */ - __pyx_v_item = __pyx_v_tmp; - - /* "View.MemoryView":458 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":464 - * item = tmp - * else: - * item = array # <<<<<<<<<<<<<< - * - * try: - */ - /*else*/ { - __pyx_v_item = ((void *)__pyx_v_array); - } - __pyx_L3:; - - /* "View.MemoryView":466 - * item = array - * - * try: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * ( item)[0] = value - */ - /*try:*/ { - - /* "View.MemoryView":467 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * ( item)[0] = value - * else: - */ - __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":468 - * try: - * if self.dtype_is_object: - * ( item)[0] = value # <<<<<<<<<<<<<< - * else: - * self.assign_item_from_object( item, value) - */ - (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); - - /* "View.MemoryView":467 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * ( item)[0] = value - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":470 - * ( item)[0] = value - * else: - * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L8:; - - /* "View.MemoryView":474 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":475 - * - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - * item, self.dtype_is_object) - */ - __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":474 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - } - - /* "View.MemoryView":476 - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< - * item, self.dtype_is_object) - * finally: - */ - __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); - } - - /* "View.MemoryView":479 - * item, self.dtype_is_object) - * finally: - * PyMem_Free(tmp) # <<<<<<<<<<<<<< - * - * cdef setitem_indexed(self, index, value): - */ - /*finally:*/ { - /*normal exit:*/{ - PyMem_Free(__pyx_v_tmp); - goto __pyx_L7; - } - __pyx_L6_error:; - /*exception exit:*/{ - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); - if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_7); - __Pyx_XGOTREF(__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_10); - __Pyx_XGOTREF(__pyx_t_11); - __Pyx_XGOTREF(__pyx_t_12); - __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; - { - PyMem_Free(__pyx_v_tmp); - } - if (PY_MAJOR_VERSION >= 3) { - __Pyx_XGIVEREF(__pyx_t_10); - __Pyx_XGIVEREF(__pyx_t_11); - __Pyx_XGIVEREF(__pyx_t_12); - __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); - } - __Pyx_XGIVEREF(__pyx_t_7); - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_XGIVEREF(__pyx_t_9); - __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; - goto __pyx_L1_error; - } - __pyx_L7:; - } - - /* "View.MemoryView":449 - * src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":481 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - char *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_indexed", 0); - - /* "View.MemoryView":482 - * - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< - * self.assign_item_from_object(itemp, value) - * - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_1; - - /* "View.MemoryView":483 - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":481 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":485 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_v_struct = NULL; - PyObject *__pyx_v_bytesitem = 0; - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - size_t __pyx_t_10; - int __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":488 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef bytes bytesitem - * - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":491 - * cdef bytes bytesitem - * - * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< - * try: - * result = struct.unpack(self.view.format, bytesitem) - */ - __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - /*try:*/ { - - /* "View.MemoryView":493 - * bytesitem = itemp[:self.view.itemsize] - * try: - * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< - * except struct.error: - * raise ValueError("Unable to convert item to object") - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_8 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_5)) { - PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { - PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } else - #endif - { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_9); - if (__pyx_t_7) { - __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; - } - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); - __Pyx_INCREF(__pyx_v_bytesitem); - __Pyx_GIVEREF(__pyx_v_bytesitem); - PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); - __pyx_t_6 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_result = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - } - - /* "View.MemoryView":497 - * raise ValueError("Unable to convert item to object") - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - /*else:*/ { - __pyx_t_10 = strlen(__pyx_v_self->view.format); - __pyx_t_11 = ((__pyx_t_10 == 1) != 0); - if (__pyx_t_11) { - - /* "View.MemoryView":498 - * else: - * if len(self.view.format) == 1: - * return result[0] # <<<<<<<<<<<<<< - * return result - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L6_except_return; - - /* "View.MemoryView":497 - * raise ValueError("Unable to convert item to object") - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - } - - /* "View.MemoryView":499 - * if len(self.view.format) == 1: - * return result[0] - * return result # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_result); - __pyx_r = __pyx_v_result; - goto __pyx_L6_except_return; - } - __pyx_L3_error:; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "View.MemoryView":494 - * try: - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: # <<<<<<<<<<<<<< - * raise ValueError("Unable to convert item to object") - * else: - */ - __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); - __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; - if (__pyx_t_8) { - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_1); - - /* "View.MemoryView":495 - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< - * else: - * if len(self.view.format) == 1: - */ - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_Raise(__pyx_t_6, 0, 0, 0); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __PYX_ERR(1, 495, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L1_error; - __pyx_L6_except_return:; - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L0; - } - - /* "View.MemoryView":485 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesitem); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":501 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_v_struct = NULL; - char __pyx_v_c; - PyObject *__pyx_v_bytesvalue = 0; - Py_ssize_t __pyx_v_i; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - Py_ssize_t __pyx_t_9; - PyObject *__pyx_t_10 = NULL; - char *__pyx_t_11; - char *__pyx_t_12; - char *__pyx_t_13; - char *__pyx_t_14; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":504 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef char c - * cdef bytes bytesvalue - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":509 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - __pyx_t_2 = PyTuple_Check(__pyx_v_value); - __pyx_t_3 = (__pyx_t_2 != 0); - if (__pyx_t_3) { - - /* "View.MemoryView":510 - * - * if isinstance(value, tuple): - * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< - * else: - * bytesvalue = struct.pack(self.view.format, value) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":509 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":512 - * bytesvalue = struct.pack(self.view.format, *value) - * else: - * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< - * - * for i, c in enumerate(bytesvalue): - */ - /*else*/ { - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_7 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else - #endif - { - __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - if (__pyx_t_5) { - __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; - } - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); - __Pyx_INCREF(__pyx_v_value); - __Pyx_GIVEREF(__pyx_v_value); - PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); - __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":514 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_9 = 0; - if (unlikely(__pyx_v_bytesvalue == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); - __PYX_ERR(1, 514, __pyx_L1_error) - } - __Pyx_INCREF(__pyx_v_bytesvalue); - __pyx_t_10 = __pyx_v_bytesvalue; - __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); - __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); - for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { - __pyx_t_11 = __pyx_t_14; - __pyx_v_c = (__pyx_t_11[0]); - - /* "View.MemoryView":515 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - __pyx_v_i = __pyx_t_9; - - /* "View.MemoryView":514 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_9 = (__pyx_t_9 + 1); - - /* "View.MemoryView":515 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; - } - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "View.MemoryView":501 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesvalue); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":518 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") - */ - -/* Python wrapper */ -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t *__pyx_t_4; - char *__pyx_t_5; - void *__pyx_t_6; - int __pyx_t_7; - Py_ssize_t __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (__pyx_v_info == NULL) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":519 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - */ - __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_2 = (__pyx_v_self->view.readonly != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":520 - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< - * - * if flags & PyBUF_ND: - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 520, __pyx_L1_error) - - /* "View.MemoryView":519 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - */ - } - - /* "View.MemoryView":522 - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":523 - * - * if flags & PyBUF_ND: - * info.shape = self.view.shape # <<<<<<<<<<<<<< - * else: - * info.shape = NULL - */ - __pyx_t_4 = __pyx_v_self->view.shape; - __pyx_v_info->shape = __pyx_t_4; - - /* "View.MemoryView":522 - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":525 - * info.shape = self.view.shape - * else: - * info.shape = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_STRIDES: - */ - /*else*/ { - __pyx_v_info->shape = NULL; - } - __pyx_L6:; - - /* "View.MemoryView":527 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":528 - * - * if flags & PyBUF_STRIDES: - * info.strides = self.view.strides # <<<<<<<<<<<<<< - * else: - * info.strides = NULL - */ - __pyx_t_4 = __pyx_v_self->view.strides; - __pyx_v_info->strides = __pyx_t_4; - - /* "View.MemoryView":527 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - goto __pyx_L7; - } - - /* "View.MemoryView":530 - * info.strides = self.view.strides - * else: - * info.strides = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_INDIRECT: - */ - /*else*/ { - __pyx_v_info->strides = NULL; - } - __pyx_L7:; - - /* "View.MemoryView":532 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":533 - * - * if flags & PyBUF_INDIRECT: - * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< - * else: - * info.suboffsets = NULL - */ - __pyx_t_4 = __pyx_v_self->view.suboffsets; - __pyx_v_info->suboffsets = __pyx_t_4; - - /* "View.MemoryView":532 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":535 - * info.suboffsets = self.view.suboffsets - * else: - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - /*else*/ { - __pyx_v_info->suboffsets = NULL; - } - __pyx_L8:; - - /* "View.MemoryView":537 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":538 - * - * if flags & PyBUF_FORMAT: - * info.format = self.view.format # <<<<<<<<<<<<<< - * else: - * info.format = NULL - */ - __pyx_t_5 = __pyx_v_self->view.format; - __pyx_v_info->format = __pyx_t_5; - - /* "View.MemoryView":537 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":540 - * info.format = self.view.format - * else: - * info.format = NULL # <<<<<<<<<<<<<< - * - * info.buf = self.view.buf - */ - /*else*/ { - __pyx_v_info->format = NULL; - } - __pyx_L9:; - - /* "View.MemoryView":542 - * info.format = NULL - * - * info.buf = self.view.buf # <<<<<<<<<<<<<< - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - */ - __pyx_t_6 = __pyx_v_self->view.buf; - __pyx_v_info->buf = __pyx_t_6; - - /* "View.MemoryView":543 - * - * info.buf = self.view.buf - * info.ndim = self.view.ndim # <<<<<<<<<<<<<< - * info.itemsize = self.view.itemsize - * info.len = self.view.len - */ - __pyx_t_7 = __pyx_v_self->view.ndim; - __pyx_v_info->ndim = __pyx_t_7; - - /* "View.MemoryView":544 - * info.buf = self.view.buf - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< - * info.len = self.view.len - * info.readonly = self.view.readonly - */ - __pyx_t_8 = __pyx_v_self->view.itemsize; - __pyx_v_info->itemsize = __pyx_t_8; - - /* "View.MemoryView":545 - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - * info.len = self.view.len # <<<<<<<<<<<<<< - * info.readonly = self.view.readonly - * info.obj = self - */ - __pyx_t_8 = __pyx_v_self->view.len; - __pyx_v_info->len = __pyx_t_8; - - /* "View.MemoryView":546 - * info.itemsize = self.view.itemsize - * info.len = self.view.len - * info.readonly = self.view.readonly # <<<<<<<<<<<<<< - * info.obj = self - * - */ - __pyx_t_1 = __pyx_v_self->view.readonly; - __pyx_v_info->readonly = __pyx_t_1; - - /* "View.MemoryView":547 - * info.len = self.view.len - * info.readonly = self.view.readonly - * info.obj = self # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":518 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":553 - * - * @property - * def T(self): # <<<<<<<<<<<<<< - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":554 - * @property - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< - * transpose_memslice(&result.from_slice) - * return result - */ - __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":555 - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) - - /* "View.MemoryView":556 - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - * return result # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":553 - * - * @property - * def T(self): # <<<<<<<<<<<<<< - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":559 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":560 - * @property - * def base(self): - * return self.obj # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->obj); - __pyx_r = __pyx_v_self->obj; - goto __pyx_L0; - - /* "View.MemoryView":559 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":563 - * - * @property - * def shape(self): # <<<<<<<<<<<<<< - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_length; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":564 - * @property - * def shape(self): - * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_v_length = (__pyx_t_2[0]); - __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "View.MemoryView":563 - * - * @property - * def shape(self): # <<<<<<<<<<<<<< - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":567 - * - * @property - * def strides(self): # <<<<<<<<<<<<<< - * if self.view.strides == NULL: - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_stride; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":568 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError("Buffer view does not expose strides") - */ - __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":570 - * if self.view.strides == NULL: - * - * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 570, __pyx_L1_error) - - /* "View.MemoryView":568 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError("Buffer view does not expose strides") - */ - } - - /* "View.MemoryView":572 - * raise ValueError("Buffer view does not expose strides") - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_v_stride = (__pyx_t_3[0]); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_6; - __pyx_t_6 = 0; - goto __pyx_L0; - - /* "View.MemoryView":567 - * - * @property - * def strides(self): # <<<<<<<<<<<<<< - * if self.view.strides == NULL: - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":575 - * - * @property - * def suboffsets(self): # <<<<<<<<<<<<<< - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - Py_ssize_t *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":576 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":577 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":576 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - } - - /* "View.MemoryView":579 - * return (-1,) * self.view.ndim - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); - for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { - __pyx_t_4 = __pyx_t_6; - __pyx_v_suboffset = (__pyx_t_4[0]); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":575 - * - * @property - * def suboffsets(self): # <<<<<<<<<<<<<< - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":582 - * - * @property - * def ndim(self): # <<<<<<<<<<<<<< - * return self.view.ndim - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":583 - * @property - * def ndim(self): - * return self.view.ndim # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":582 - * - * @property - * def ndim(self): # <<<<<<<<<<<<<< - * return self.view.ndim - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":586 - * - * @property - * def itemsize(self): # <<<<<<<<<<<<<< - * return self.view.itemsize - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":587 - * @property - * def itemsize(self): - * return self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":586 - * - * @property - * def itemsize(self): # <<<<<<<<<<<<<< - * return self.view.itemsize - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":590 - * - * @property - * def nbytes(self): # <<<<<<<<<<<<<< - * return self.size * self.view.itemsize - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":591 - * @property - * def nbytes(self): - * return self.size * self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":590 - * - * @property - * def nbytes(self): # <<<<<<<<<<<<<< - * return self.size * self.view.itemsize - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":594 - * - * @property - * def size(self): # <<<<<<<<<<<<<< - * if self._size is None: - * result = 1 - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":595 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - __pyx_t_1 = (__pyx_v_self->_size == Py_None); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":596 - * def size(self): - * if self._size is None: - * result = 1 # <<<<<<<<<<<<<< - * - * for length in self.view.shape[:self.view.ndim]: - */ - __Pyx_INCREF(__pyx_int_1); - __pyx_v_result = __pyx_int_1; - - /* "View.MemoryView":598 - * result = 1 - * - * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< - * result *= length - * - */ - __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); - __pyx_t_6 = 0; - - /* "View.MemoryView":599 - * - * for length in self.view.shape[:self.view.ndim]: - * result *= length # <<<<<<<<<<<<<< - * - * self._size = result - */ - __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); - __pyx_t_6 = 0; - } - - /* "View.MemoryView":601 - * result *= length - * - * self._size = result # <<<<<<<<<<<<<< - * - * return self._size - */ - __Pyx_INCREF(__pyx_v_result); - __Pyx_GIVEREF(__pyx_v_result); - __Pyx_GOTREF(__pyx_v_self->_size); - __Pyx_DECREF(__pyx_v_self->_size); - __pyx_v_self->_size = __pyx_v_result; - - /* "View.MemoryView":595 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - } - - /* "View.MemoryView":603 - * self._size = result - * - * return self._size # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->_size); - __pyx_r = __pyx_v_self->_size; - goto __pyx_L0; - - /* "View.MemoryView":594 - * - * @property - * def size(self): # <<<<<<<<<<<<<< - * if self._size is None: - * result = 1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":605 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":606 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":607 - * def __len__(self): - * if self.view.ndim >= 1: - * return self.view.shape[0] # <<<<<<<<<<<<<< - * - * return 0 - */ - __pyx_r = (__pyx_v_self->view.shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":606 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - } - - /* "View.MemoryView":609 - * return self.view.shape[0] - * - * return 0 # <<<<<<<<<<<<<< - * - * def __repr__(self): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":605 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":611 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__, - * id(self)) - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":612 - * - * def __repr__(self): - * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":613 - * def __repr__(self): - * return "" % (self.base.__class__.__name__, - * id(self)) # <<<<<<<<<<<<<< - * - * def __str__(self): - */ - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - - /* "View.MemoryView":612 - * - * def __repr__(self): - * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":611 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__, - * id(self)) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":615 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__,) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__str__", 0); - - /* "View.MemoryView":616 - * - * def __str__(self): - * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":615 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__,) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":619 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_c_contig", 0); - - /* "View.MemoryView":622 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":623 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< - * - * def is_f_contig(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":619 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":625 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_f_contig", 0); - - /* "View.MemoryView":628 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":629 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< - * - * def copy(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":625 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":631 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_mslice; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy", 0); - - /* "View.MemoryView":633 - * def copy(self): - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &mslice) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); - - /* "View.MemoryView":635 - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - * - * slice_copy(self, &mslice) # <<<<<<<<<<<<<< - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); - - /* "View.MemoryView":636 - * - * slice_copy(self, &mslice) - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_C_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":641 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< - * - * def copy_fortran(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":631 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":643 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy_fortran", 0); - - /* "View.MemoryView":645 - * def copy_fortran(self): - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &src) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); - - /* "View.MemoryView":647 - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - * - * slice_copy(self, &src) # <<<<<<<<<<<<<< - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); - - /* "View.MemoryView":648 - * - * slice_copy(self, &src) - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_F_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) - __pyx_v_dst = __pyx_t_1; - - /* "View.MemoryView":653 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":643 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":657 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - -static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { - struct __pyx_memoryview_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); - - /* "View.MemoryView":658 - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< - * result.typeinfo = typeinfo - * return result - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_o); - __Pyx_GIVEREF(__pyx_v_o); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":659 - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_v_result->typeinfo = __pyx_v_typeinfo; - - /* "View.MemoryView":660 - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_check') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":657 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":663 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("memoryview_check", 0); - - /* "View.MemoryView":664 - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): - * return isinstance(o, memoryview) # <<<<<<<<<<<<<< - * - * cdef tuple _unellipsify(object index, int ndim): - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); - __pyx_r = __pyx_t_1; - goto __pyx_L0; - - /* "View.MemoryView":663 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":666 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - -static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { - PyObject *__pyx_v_tup = NULL; - PyObject *__pyx_v_result = NULL; - int __pyx_v_have_slices; - int __pyx_v_seen_ellipsis; - CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; - PyObject *__pyx_v_item = NULL; - Py_ssize_t __pyx_v_nslices; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - Py_ssize_t __pyx_t_5; - PyObject *(*__pyx_t_6)(PyObject *); - PyObject *__pyx_t_7 = NULL; - Py_ssize_t __pyx_t_8; - int __pyx_t_9; - int __pyx_t_10; - PyObject *__pyx_t_11 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_unellipsify", 0); - - /* "View.MemoryView":671 - * full slices. - * """ - * if not isinstance(index, tuple): # <<<<<<<<<<<<<< - * tup = (index,) - * else: - */ - __pyx_t_1 = PyTuple_Check(__pyx_v_index); - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":672 - * """ - * if not isinstance(index, tuple): - * tup = (index,) # <<<<<<<<<<<<<< - * else: - * tup = index - */ - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_index); - __Pyx_GIVEREF(__pyx_v_index); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); - __pyx_v_tup = __pyx_t_3; - __pyx_t_3 = 0; - - /* "View.MemoryView":671 - * full slices. - * """ - * if not isinstance(index, tuple): # <<<<<<<<<<<<<< - * tup = (index,) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":674 - * tup = (index,) - * else: - * tup = index # <<<<<<<<<<<<<< - * - * result = [] - */ - /*else*/ { - __Pyx_INCREF(__pyx_v_index); - __pyx_v_tup = __pyx_v_index; - } - __pyx_L3:; - - /* "View.MemoryView":676 - * tup = index - * - * result = [] # <<<<<<<<<<<<<< - * have_slices = False - * seen_ellipsis = False - */ - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_result = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":677 - * - * result = [] - * have_slices = False # <<<<<<<<<<<<<< - * seen_ellipsis = False - * for idx, item in enumerate(tup): - */ - __pyx_v_have_slices = 0; - - /* "View.MemoryView":678 - * result = [] - * have_slices = False - * seen_ellipsis = False # <<<<<<<<<<<<<< - * for idx, item in enumerate(tup): - * if item is Ellipsis: - */ - __pyx_v_seen_ellipsis = 0; - - /* "View.MemoryView":679 - * have_slices = False - * seen_ellipsis = False - * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - __Pyx_INCREF(__pyx_int_0); - __pyx_t_3 = __pyx_int_0; - if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { - __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; - __pyx_t_6 = NULL; - } else { - __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_6)) { - if (likely(PyList_CheckExact(__pyx_t_4))) { - if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } else { - if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } - } else { - __pyx_t_7 = __pyx_t_6(__pyx_t_4); - if (unlikely(!__pyx_t_7)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 679, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_7); - } - __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_INCREF(__pyx_t_3); - __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); - __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = __pyx_t_7; - __pyx_t_7 = 0; - - /* "View.MemoryView":680 - * seen_ellipsis = False - * for idx, item in enumerate(tup): - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - */ - __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":681 - * for idx, item in enumerate(tup): - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True - */ - __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":682 - * if item is Ellipsis: - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< - * seen_ellipsis = True - * else: - */ - __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) - __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { - __Pyx_INCREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); - } - } - __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "View.MemoryView":683 - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True # <<<<<<<<<<<<<< - * else: - * result.append(slice(None)) - */ - __pyx_v_seen_ellipsis = 1; - - /* "View.MemoryView":681 - * for idx, item in enumerate(tup): - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True - */ - goto __pyx_L7; - } - - /* "View.MemoryView":685 - * seen_ellipsis = True - * else: - * result.append(slice(None)) # <<<<<<<<<<<<<< - * have_slices = True - * else: - */ - /*else*/ { - __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) - } - __pyx_L7:; - - /* "View.MemoryView":686 - * else: - * result.append(slice(None)) - * have_slices = True # <<<<<<<<<<<<<< - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): - */ - __pyx_v_have_slices = 1; - - /* "View.MemoryView":680 - * seen_ellipsis = False - * for idx, item in enumerate(tup): - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - */ - goto __pyx_L6; - } - - /* "View.MemoryView":688 - * have_slices = True - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - */ - /*else*/ { - __pyx_t_2 = PySlice_Check(__pyx_v_item); - __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); - if (__pyx_t_10) { - } else { - __pyx_t_1 = __pyx_t_10; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); - __pyx_t_1 = __pyx_t_10; - __pyx_L9_bool_binop_done:; - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":689 - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): - * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< - * - * have_slices = have_slices or isinstance(item, slice) - */ - __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_Raise(__pyx_t_11, 0, 0, 0); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __PYX_ERR(1, 689, __pyx_L1_error) - - /* "View.MemoryView":688 - * have_slices = True - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - */ - } - - /* "View.MemoryView":691 - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< - * result.append(item) - * - */ - __pyx_t_10 = (__pyx_v_have_slices != 0); - if (!__pyx_t_10) { - } else { - __pyx_t_1 = __pyx_t_10; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_10 = PySlice_Check(__pyx_v_item); - __pyx_t_2 = (__pyx_t_10 != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L11_bool_binop_done:; - __pyx_v_have_slices = __pyx_t_1; - - /* "View.MemoryView":692 - * - * have_slices = have_slices or isinstance(item, slice) - * result.append(item) # <<<<<<<<<<<<<< - * - * nslices = ndim - len(result) - */ - __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) - } - __pyx_L6:; - - /* "View.MemoryView":679 - * have_slices = False - * seen_ellipsis = False - * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":694 - * result.append(item) - * - * nslices = ndim - len(result) # <<<<<<<<<<<<<< - * if nslices: - * result.extend([slice(None)] * nslices) - */ - __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) - __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); - - /* "View.MemoryView":695 - * - * nslices = ndim - len(result) - * if nslices: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * nslices) - * - */ - __pyx_t_1 = (__pyx_v_nslices != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":696 - * nslices = ndim - len(result) - * if nslices: - * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< - * - * return have_slices or nslices, tuple(result) - */ - __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { - __Pyx_INCREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16); - } - } - __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":695 - * - * nslices = ndim - len(result) - * if nslices: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * nslices) - * - */ - } - - /* "View.MemoryView":698 - * result.extend([slice(None)] * nslices) - * - * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - */ - __Pyx_XDECREF(__pyx_r); - if (!__pyx_v_have_slices) { - } else { - __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L14_bool_binop_done; - } - __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_L14_bool_binop_done:; - __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_r = ((PyObject*)__pyx_t_11); - __pyx_t_11 = 0; - goto __pyx_L0; - - /* "View.MemoryView":666 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_tup); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_idx); - __Pyx_XDECREF(__pyx_v_item); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - -static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); - - /* "View.MemoryView":701 - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") - */ - __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); - for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { - __pyx_t_1 = __pyx_t_3; - __pyx_v_suboffset = (__pyx_t_1[0]); - - /* "View.MemoryView":702 - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError("Indirect dimensions not supported") - * - */ - __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":703 - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(1, 703, __pyx_L1_error) - - /* "View.MemoryView":702 - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError("Indirect dimensions not supported") - * - */ - } - } - - /* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":710 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { - int __pyx_v_new_ndim; - int __pyx_v_suboffset_dim; - int __pyx_v_dim; - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - __Pyx_memviewslice *__pyx_v_p_src; - struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; - __Pyx_memviewslice *__pyx_v_p_dst; - int *__pyx_v_p_suboffset_dim; - Py_ssize_t __pyx_v_start; - Py_ssize_t __pyx_v_stop; - Py_ssize_t __pyx_v_step; - int __pyx_v_have_start; - int __pyx_v_have_stop; - int __pyx_v_have_step; - PyObject *__pyx_v_index = NULL; - struct __pyx_memoryview_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - struct __pyx_memoryview_obj *__pyx_t_4; - char *__pyx_t_5; - int __pyx_t_6; - Py_ssize_t __pyx_t_7; - PyObject *(*__pyx_t_8)(PyObject *); - PyObject *__pyx_t_9 = NULL; - Py_ssize_t __pyx_t_10; - int __pyx_t_11; - Py_ssize_t __pyx_t_12; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memview_slice", 0); - - /* "View.MemoryView":711 - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): - * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< - * cdef bint negative_step - * cdef __Pyx_memviewslice src, dst - */ - __pyx_v_new_ndim = 0; - __pyx_v_suboffset_dim = -1; - - /* "View.MemoryView":718 - * - * - * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< - * - * cdef _memoryviewslice memviewsliceobj - */ - (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); - - /* "View.MemoryView":722 - * cdef _memoryviewslice memviewsliceobj - * - * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - #ifndef CYTHON_WITHOUT_ASSERTIONS - if (unlikely(!Py_OptimizeFlag)) { - if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { - PyErr_SetNone(PyExc_AssertionError); - __PYX_ERR(1, 722, __pyx_L1_error) - } - } - #endif - - /* "View.MemoryView":724 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":725 - * - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview # <<<<<<<<<<<<<< - * p_src = &memviewsliceobj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) - __pyx_t_3 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_3); - __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":726 - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, &src) - */ - __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); - - /* "View.MemoryView":724 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - goto __pyx_L3; - } - - /* "View.MemoryView":728 - * p_src = &memviewsliceobj.from_slice - * else: - * slice_copy(memview, &src) # <<<<<<<<<<<<<< - * p_src = &src - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); - - /* "View.MemoryView":729 - * else: - * slice_copy(memview, &src) - * p_src = &src # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_p_src = (&__pyx_v_src); - } - __pyx_L3:; - - /* "View.MemoryView":735 - * - * - * dst.memview = p_src.memview # <<<<<<<<<<<<<< - * dst.data = p_src.data - * - */ - __pyx_t_4 = __pyx_v_p_src->memview; - __pyx_v_dst.memview = __pyx_t_4; - - /* "View.MemoryView":736 - * - * dst.memview = p_src.memview - * dst.data = p_src.data # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __pyx_v_p_src->data; - __pyx_v_dst.data = __pyx_t_5; - - /* "View.MemoryView":741 - * - * - * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< - * cdef int *p_suboffset_dim = &suboffset_dim - * cdef Py_ssize_t start, stop, step - */ - __pyx_v_p_dst = (&__pyx_v_dst); - - /* "View.MemoryView":742 - * - * cdef __Pyx_memviewslice *p_dst = &dst - * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< - * cdef Py_ssize_t start, stop, step - * cdef bint have_start, have_stop, have_step - */ - __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); - - /* "View.MemoryView":746 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * slice_memviewslice( - */ - __pyx_t_6 = 0; - if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { - __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; - __pyx_t_8 = NULL; - } else { - __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_8)) { - if (likely(PyList_CheckExact(__pyx_t_3))) { - if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) - #else - __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - #endif - } else { - if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) - #else - __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - #endif - } - } else { - __pyx_t_9 = __pyx_t_8(__pyx_t_3); - if (unlikely(!__pyx_t_9)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 746, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_9); - } - __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); - __pyx_t_9 = 0; - __pyx_v_dim = __pyx_t_6; - __pyx_t_6 = (__pyx_t_6 + 1); - - /* "View.MemoryView":747 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * slice_memviewslice( - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - */ - __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":751 - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< - * 0, 0, 0, # have_{start,stop,step} - * False) - */ - __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) - - /* "View.MemoryView":748 - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) - - /* "View.MemoryView":747 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * slice_memviewslice( - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - */ - goto __pyx_L6; - } - - /* "View.MemoryView":754 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - __pyx_t_2 = (__pyx_v_index == Py_None); - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":755 - * False) - * elif index is None: - * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - */ - (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; - - /* "View.MemoryView":756 - * elif index is None: - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 - */ - (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; - - /* "View.MemoryView":757 - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< - * new_ndim += 1 - * else: - */ - (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; - - /* "View.MemoryView":758 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 # <<<<<<<<<<<<<< - * else: - * start = index.start or 0 - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - - /* "View.MemoryView":754 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - goto __pyx_L6; - } - - /* "View.MemoryView":760 - * new_ndim += 1 - * else: - * start = index.start or 0 # <<<<<<<<<<<<<< - * stop = index.stop or 0 - * step = index.step or 0 - */ - /*else*/ { - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L7_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L7_bool_binop_done:; - __pyx_v_start = __pyx_t_10; - - /* "View.MemoryView":761 - * else: - * start = index.start or 0 - * stop = index.stop or 0 # <<<<<<<<<<<<<< - * step = index.step or 0 - * - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L9_bool_binop_done:; - __pyx_v_stop = __pyx_t_10; - - /* "View.MemoryView":762 - * start = index.start or 0 - * stop = index.stop or 0 - * step = index.step or 0 # <<<<<<<<<<<<<< - * - * have_start = index.start is not None - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L11_bool_binop_done:; - __pyx_v_step = __pyx_t_10; - - /* "View.MemoryView":764 - * step = index.step or 0 - * - * have_start = index.start is not None # <<<<<<<<<<<<<< - * have_stop = index.stop is not None - * have_step = index.step is not None - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_start = __pyx_t_1; - - /* "View.MemoryView":765 - * - * have_start = index.start is not None - * have_stop = index.stop is not None # <<<<<<<<<<<<<< - * have_step = index.step is not None - * - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_stop = __pyx_t_1; - - /* "View.MemoryView":766 - * have_start = index.start is not None - * have_stop = index.stop is not None - * have_step = index.step is not None # <<<<<<<<<<<<<< - * - * slice_memviewslice( - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_step = __pyx_t_1; - - /* "View.MemoryView":768 - * have_step = index.step is not None - * - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) - - /* "View.MemoryView":774 - * have_start, have_stop, have_step, - * True) - * new_ndim += 1 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - } - __pyx_L6:; - - /* "View.MemoryView":746 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * slice_memviewslice( - */ - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":776 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":777 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - - /* "View.MemoryView":778 - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< - * memviewsliceobj.to_dtype_func, - * memview.dtype_is_object) - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } - - /* "View.MemoryView":779 - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * else: - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } - - /* "View.MemoryView":777 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":776 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - } - - /* "View.MemoryView":782 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - /*else*/ { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - - /* "View.MemoryView":783 - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - - /* "View.MemoryView":782 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":710 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":807 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { - Py_ssize_t __pyx_v_new_shape; - int __pyx_v_negative_step; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":827 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":829 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - __pyx_t_1 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":830 - * - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if not 0 <= start < shape: - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":829 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - } - - /* "View.MemoryView":831 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - __pyx_t_1 = (0 <= __pyx_v_start); - if (__pyx_t_1) { - __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); - } - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":832 - * start += shape - * if not 0 <= start < shape: - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< - * else: - * - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) - - /* "View.MemoryView":831 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - } - - /* "View.MemoryView":827 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":835 - * else: - * - * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< - * - * if have_step and step == 0: - */ - /*else*/ { - __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); - if (__pyx_t_1) { - } else { - __pyx_t_2 = __pyx_t_1; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_1 = ((__pyx_v_step < 0) != 0); - __pyx_t_2 = __pyx_t_1; - __pyx_L6_bool_binop_done:; - __pyx_v_negative_step = __pyx_t_2; - - /* "View.MemoryView":837 - * negative_step = have_step != 0 and step < 0 - * - * if have_step and step == 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) - * - */ - __pyx_t_1 = (__pyx_v_have_step != 0); - if (__pyx_t_1) { - } else { - __pyx_t_2 = __pyx_t_1; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_1 = ((__pyx_v_step == 0) != 0); - __pyx_t_2 = __pyx_t_1; - __pyx_L9_bool_binop_done:; - if (__pyx_t_2) { - - /* "View.MemoryView":838 - * - * if have_step and step == 0: - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) - - /* "View.MemoryView":837 - * negative_step = have_step != 0 and step < 0 - * - * if have_step and step == 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) - * - */ - } - - /* "View.MemoryView":841 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - __pyx_t_2 = (__pyx_v_have_start != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":842 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - __pyx_t_2 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":843 - * if have_start: - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if start < 0: - * start = 0 - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":844 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - __pyx_t_2 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":845 - * start += shape - * if start < 0: - * start = 0 # <<<<<<<<<<<<<< - * elif start >= shape: - * if negative_step: - */ - __pyx_v_start = 0; - - /* "View.MemoryView":844 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - } - - /* "View.MemoryView":842 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - goto __pyx_L12; - } - - /* "View.MemoryView":846 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":847 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":848 - * elif start >= shape: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = shape - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":847 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L14; - } - - /* "View.MemoryView":850 - * start = shape - 1 - * else: - * start = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - /*else*/ { - __pyx_v_start = __pyx_v_shape; - } - __pyx_L14:; - - /* "View.MemoryView":846 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - } - __pyx_L12:; - - /* "View.MemoryView":841 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - goto __pyx_L11; - } - - /* "View.MemoryView":852 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":853 - * else: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = 0 - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":852 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L15; - } - - /* "View.MemoryView":855 - * start = shape - 1 - * else: - * start = 0 # <<<<<<<<<<<<<< - * - * if have_stop: - */ - /*else*/ { - __pyx_v_start = 0; - } - __pyx_L15:; - } - __pyx_L11:; - - /* "View.MemoryView":857 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - __pyx_t_2 = (__pyx_v_have_stop != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":858 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - __pyx_t_2 = ((__pyx_v_stop < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":859 - * if have_stop: - * if stop < 0: - * stop += shape # <<<<<<<<<<<<<< - * if stop < 0: - * stop = 0 - */ - __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); - - /* "View.MemoryView":860 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - __pyx_t_2 = ((__pyx_v_stop < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":861 - * stop += shape - * if stop < 0: - * stop = 0 # <<<<<<<<<<<<<< - * elif stop > shape: - * stop = shape - */ - __pyx_v_stop = 0; - - /* "View.MemoryView":860 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - } - - /* "View.MemoryView":858 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - goto __pyx_L17; - } - - /* "View.MemoryView":862 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":863 - * stop = 0 - * elif stop > shape: - * stop = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - __pyx_v_stop = __pyx_v_shape; - - /* "View.MemoryView":862 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - } - __pyx_L17:; - - /* "View.MemoryView":857 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - goto __pyx_L16; - } - - /* "View.MemoryView":865 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":866 - * else: - * if negative_step: - * stop = -1 # <<<<<<<<<<<<<< - * else: - * stop = shape - */ - __pyx_v_stop = -1L; - - /* "View.MemoryView":865 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - goto __pyx_L19; - } - - /* "View.MemoryView":868 - * stop = -1 - * else: - * stop = shape # <<<<<<<<<<<<<< - * - * if not have_step: - */ - /*else*/ { - __pyx_v_stop = __pyx_v_shape; - } - __pyx_L19:; - } - __pyx_L16:; - - /* "View.MemoryView":870 - * stop = shape - * - * if not have_step: # <<<<<<<<<<<<<< - * step = 1 - * - */ - __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":871 - * - * if not have_step: - * step = 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_step = 1; - - /* "View.MemoryView":870 - * stop = shape - * - * if not have_step: # <<<<<<<<<<<<<< - * step = 1 - * - */ - } - - /* "View.MemoryView":875 - * - * with cython.cdivision(True): - * new_shape = (stop - start) // step # <<<<<<<<<<<<<< - * - * if (stop - start) - step * new_shape: - */ - __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); - - /* "View.MemoryView":877 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":878 - * - * if (stop - start) - step * new_shape: - * new_shape += 1 # <<<<<<<<<<<<<< - * - * if new_shape < 0: - */ - __pyx_v_new_shape = (__pyx_v_new_shape + 1); - - /* "View.MemoryView":877 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - } - - /* "View.MemoryView":880 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":881 - * - * if new_shape < 0: - * new_shape = 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_new_shape = 0; - - /* "View.MemoryView":880 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - } - - /* "View.MemoryView":884 - * - * - * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset - */ - (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); - - /* "View.MemoryView":885 - * - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< - * dst.suboffsets[new_ndim] = suboffset - * - */ - (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; - - /* "View.MemoryView":886 - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; - } - __pyx_L3:; - - /* "View.MemoryView":889 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":890 - * - * if suboffset_dim[0] < 0: - * dst.data += start * stride # <<<<<<<<<<<<<< - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride - */ - __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); - - /* "View.MemoryView":889 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - goto __pyx_L23; - } - - /* "View.MemoryView":892 - * dst.data += start * stride - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< - * - * if suboffset >= 0: - */ - /*else*/ { - __pyx_t_3 = (__pyx_v_suboffset_dim[0]); - (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); - } - __pyx_L23:; - - /* "View.MemoryView":894 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":895 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset - */ - __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":896 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = ( dst.data)[0] + suboffset - * else: - */ - __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":897 - * if not is_slice: - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " - */ - __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":896 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = ( dst.data)[0] + suboffset - * else: - */ - goto __pyx_L26; - } - - /* "View.MemoryView":899 - * dst.data = ( dst.data)[0] + suboffset - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< - * "must be indexed and not sliced", dim) - * else: - */ - /*else*/ { - - /* "View.MemoryView":900 - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " - * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< - * else: - * suboffset_dim[0] = new_ndim - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) - } - __pyx_L26:; - - /* "View.MemoryView":895 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset - */ - goto __pyx_L25; - } - - /* "View.MemoryView":902 - * "must be indexed and not sliced", dim) - * else: - * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< - * - * return 0 - */ - /*else*/ { - (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; - } - __pyx_L25:; - - /* "View.MemoryView":894 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - } - - /* "View.MemoryView":904 - * suboffset_dim[0] = new_ndim - * - * return 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":807 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = -1; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":910 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - -static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_suboffset; - Py_ssize_t __pyx_v_itemsize; - char *__pyx_v_resultp; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("pybuffer_index", 0); - - /* "View.MemoryView":912 - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< - * cdef Py_ssize_t itemsize = view.itemsize - * cdef char *resultp - */ - __pyx_v_suboffset = -1L; - - /* "View.MemoryView":913 - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< - * cdef char *resultp - * - */ - __pyx_t_1 = __pyx_v_view->itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":916 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len / itemsize - * stride = itemsize - */ - __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":917 - * - * if view.ndim == 0: - * shape = view.len / itemsize # <<<<<<<<<<<<<< - * stride = itemsize - * else: - */ - if (unlikely(__pyx_v_itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(1, 917, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(1, 917, __pyx_L1_error) - } - __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); - - /* "View.MemoryView":918 - * if view.ndim == 0: - * shape = view.len / itemsize - * stride = itemsize # <<<<<<<<<<<<<< - * else: - * shape = view.shape[dim] - */ - __pyx_v_stride = __pyx_v_itemsize; - - /* "View.MemoryView":916 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len / itemsize - * stride = itemsize - */ - goto __pyx_L3; - } - - /* "View.MemoryView":920 - * stride = itemsize - * else: - * shape = view.shape[dim] # <<<<<<<<<<<<<< - * stride = view.strides[dim] - * if view.suboffsets != NULL: - */ - /*else*/ { - __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); - - /* "View.MemoryView":921 - * else: - * shape = view.shape[dim] - * stride = view.strides[dim] # <<<<<<<<<<<<<< - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] - */ - __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); - - /* "View.MemoryView":922 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":923 - * stride = view.strides[dim] - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< - * - * if index < 0: - */ - __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); - - /* "View.MemoryView":922 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - } - } - __pyx_L3:; - - /* "View.MemoryView":925 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - __pyx_t_2 = ((__pyx_v_index < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":926 - * - * if index < 0: - * index += view.shape[dim] # <<<<<<<<<<<<<< - * if index < 0: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - */ - __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); - - /* "View.MemoryView":927 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - __pyx_t_2 = ((__pyx_v_index < 0) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":928 - * index += view.shape[dim] - * if index < 0: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< - * - * if index >= shape: - */ - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 928, __pyx_L1_error) - - /* "View.MemoryView":927 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - } - - /* "View.MemoryView":925 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - } - - /* "View.MemoryView":930 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":931 - * - * if index >= shape: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< - * - * resultp = bufp + index * stride - */ - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 931, __pyx_L1_error) - - /* "View.MemoryView":930 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - } - - /* "View.MemoryView":933 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * resultp = bufp + index * stride # <<<<<<<<<<<<<< - * if suboffset >= 0: - * resultp = ( resultp)[0] + suboffset - */ - __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); - - /* "View.MemoryView":934 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = ( resultp)[0] + suboffset - * - */ - __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":935 - * resultp = bufp + index * stride - * if suboffset >= 0: - * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< - * - * return resultp - */ - __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":934 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = ( resultp)[0] + suboffset - * - */ - } - - /* "View.MemoryView":937 - * resultp = ( resultp)[0] + suboffset - * - * return resultp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_resultp; - goto __pyx_L0; - - /* "View.MemoryView":910 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":943 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - -static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { - int __pyx_v_ndim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_r; - int __pyx_t_1; - Py_ssize_t *__pyx_t_2; - long __pyx_t_3; - long __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":944 - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: - * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< - * - * cdef Py_ssize_t *shape = memslice.shape - */ - __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; - __pyx_v_ndim = __pyx_t_1; - - /* "View.MemoryView":946 - * cdef int ndim = memslice.memview.view.ndim - * - * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< - * cdef Py_ssize_t *strides = memslice.strides - * - */ - __pyx_t_2 = __pyx_v_memslice->shape; - __pyx_v_shape = __pyx_t_2; - - /* "View.MemoryView":947 - * - * cdef Py_ssize_t *shape = memslice.shape - * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = __pyx_v_memslice->strides; - __pyx_v_strides = __pyx_t_2; - - /* "View.MemoryView":951 - * - * cdef int i, j - * for i in range(ndim / 2): # <<<<<<<<<<<<<< - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - */ - __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":952 - * cdef int i, j - * for i in range(ndim / 2): - * j = ndim - 1 - i # <<<<<<<<<<<<<< - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] - */ - __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); - - /* "View.MemoryView":953 - * for i in range(ndim / 2): - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< - * shape[i], shape[j] = shape[j], shape[i] - * - */ - __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); - __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); - (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; - (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; - - /* "View.MemoryView":954 - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - */ - __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); - __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); - (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; - (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; - - /* "View.MemoryView":956 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); - if (!__pyx_t_8) { - } else { - __pyx_t_7 = __pyx_t_8; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); - __pyx_t_7 = __pyx_t_8; - __pyx_L6_bool_binop_done:; - if (__pyx_t_7) { - - /* "View.MemoryView":957 - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< - * - * return 1 - */ - __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) - - /* "View.MemoryView":956 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - } - } - - /* "View.MemoryView":959 - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - * return 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 1; - goto __pyx_L0; - - /* "View.MemoryView":943 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":976 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - */ - -/* Python wrapper */ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":977 - * - * def __dealloc__(self): - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); - - /* "View.MemoryView":976 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":979 - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":980 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":981 - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) # <<<<<<<<<<<<<< - * else: - * return memoryview.convert_item_to_object(self, itemp) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":980 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - } - - /* "View.MemoryView":983 - * return self.to_object_func(itemp) - * else: - * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":979 - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":985 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":986 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":987 - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< - * else: - * memoryview.assign_item_from_object(self, itemp, value) - */ - __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) - - /* "View.MemoryView":986 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":989 - * self.to_dtype_func(itemp, value) - * else: - * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< - * - * @property - */ - /*else*/ { - __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":985 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":992 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":993 - * @property - * def base(self): - * return self.from_object # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->from_object); - __pyx_r = __pyx_v_self->from_object; - goto __pyx_L0; - - /* "View.MemoryView":992 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - __Pyx_TypeInfo *__pyx_t_4; - Py_buffer __pyx_t_5; - Py_ssize_t *__pyx_t_6; - Py_ssize_t *__pyx_t_7; - Py_ssize_t *__pyx_t_8; - Py_ssize_t __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_fromslice", 0); - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1008 - * - * if memviewslice.memview == Py_None: - * return None # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - } - - /* "View.MemoryView":1013 - * - * - * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< - * - * result.from_slice = memviewslice - */ - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1015 - * result = _memoryviewslice(None, 0, dtype_is_object) - * - * result.from_slice = memviewslice # <<<<<<<<<<<<<< - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - */ - __pyx_v_result->from_slice = __pyx_v_memviewslice; - - /* "View.MemoryView":1016 - * - * result.from_slice = memviewslice - * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< - * - * result.from_object = ( memviewslice.memview).base - */ - __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); - - /* "View.MemoryView":1018 - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - * result.from_object = ( memviewslice.memview).base # <<<<<<<<<<<<<< - * result.typeinfo = memviewslice.memview.typeinfo - * - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_GOTREF(__pyx_v_result->from_object); - __Pyx_DECREF(__pyx_v_result->from_object); - __pyx_v_result->from_object = __pyx_t_2; - __pyx_t_2 = 0; - - /* "View.MemoryView":1019 - * - * result.from_object = ( memviewslice.memview).base - * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< - * - * result.view = memviewslice.memview.view - */ - __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; - __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; - - /* "View.MemoryView":1021 - * result.typeinfo = memviewslice.memview.typeinfo - * - * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< - * result.view.buf = memviewslice.data - * result.view.ndim = ndim - */ - __pyx_t_5 = __pyx_v_memviewslice.memview->view; - __pyx_v_result->__pyx_base.view = __pyx_t_5; - - /* "View.MemoryView":1022 - * - * result.view = memviewslice.memview.view - * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - */ - __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); - - /* "View.MemoryView":1023 - * result.view = memviewslice.memview.view - * result.view.buf = memviewslice.data - * result.view.ndim = ndim # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; - - /* "View.MemoryView":1024 - * result.view.buf = memviewslice.data - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; - - /* "View.MemoryView":1025 - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1028 - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: - * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< - * else: - * result.flags = PyBUF_RECORDS_RO - */ - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1030 - * result.flags = PyBUF_RECORDS - * else: - * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< - * - * result.view.shape = result.from_slice.shape - */ - /*else*/ { - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; - } - __pyx_L4:; - - /* "View.MemoryView":1032 - * result.flags = PyBUF_RECORDS_RO - * - * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< - * result.view.strides = result.from_slice.strides - * - */ - __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); - - /* "View.MemoryView":1033 - * - * result.view.shape = result.from_slice.shape - * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); - - /* "View.MemoryView":1036 - * - * - * result.view.suboffsets = NULL # <<<<<<<<<<<<<< - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - */ - __pyx_v_result->__pyx_base.view.suboffsets = NULL; - - /* "View.MemoryView":1037 - * - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets - */ - __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_v_suboffset = (__pyx_t_6[0]); - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = result.from_slice.suboffsets - * break - */ - __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1039 - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); - - /* "View.MemoryView":1040 - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets - * break # <<<<<<<<<<<<<< - * - * result.view.len = result.view.itemsize - */ - goto __pyx_L6_break; - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = result.from_slice.suboffsets - * break - */ - } - } - __pyx_L6_break:; - - /* "View.MemoryView":1042 - * break - * - * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< - * for length in result.view.shape[:ndim]: - * result.view.len *= length - */ - __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - - /* "View.MemoryView":1043 - * - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< - * result.view.len *= length - * - */ - __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1044 - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: - * result.view.len *= length # <<<<<<<<<<<<<< - * - * result.to_object_func = to_object_func - */ - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - } - - /* "View.MemoryView":1046 - * result.view.len *= length - * - * result.to_object_func = to_object_func # <<<<<<<<<<<<<< - * result.to_dtype_func = to_dtype_func - * - */ - __pyx_v_result->to_object_func = __pyx_v_to_object_func; - - /* "View.MemoryView":1047 - * - * result.to_object_func = to_object_func - * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; - - /* "View.MemoryView":1049 - * result.to_dtype_func = to_dtype_func - * - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { - struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; - __Pyx_memviewslice *__pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_slice_from_memview", 0); - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1056 - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): - * obj = memview # <<<<<<<<<<<<<< - * return &obj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) - __pyx_t_3 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_3); - __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":1057 - * if isinstance(memview, _memoryviewslice): - * obj = memview - * return &obj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, mslice) - */ - __pyx_r = (&__pyx_v_obj->from_slice); - goto __pyx_L0; - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - } - - /* "View.MemoryView":1059 - * return &obj.from_slice - * else: - * slice_copy(memview, mslice) # <<<<<<<<<<<<<< - * return mslice - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); - - /* "View.MemoryView":1060 - * else: - * slice_copy(memview, mslice) - * return mslice # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_slice_copy') - */ - __pyx_r = __pyx_v_mslice; - goto __pyx_L0; - } - - /* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_obj); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { - int __pyx_v_dim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - Py_ssize_t *__pyx_v_suboffsets; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - Py_ssize_t __pyx_t_5; - __Pyx_RefNannySetupContext("slice_copy", 0); - - /* "View.MemoryView":1067 - * cdef (Py_ssize_t*) shape, strides, suboffsets - * - * shape = memview.view.shape # <<<<<<<<<<<<<< - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets - */ - __pyx_t_1 = __pyx_v_memview->view.shape; - __pyx_v_shape = __pyx_t_1; - - /* "View.MemoryView":1068 - * - * shape = memview.view.shape - * strides = memview.view.strides # <<<<<<<<<<<<<< - * suboffsets = memview.view.suboffsets - * - */ - __pyx_t_1 = __pyx_v_memview->view.strides; - __pyx_v_strides = __pyx_t_1; - - /* "View.MemoryView":1069 - * shape = memview.view.shape - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< - * - * dst.memview = <__pyx_memoryview *> memview - */ - __pyx_t_1 = __pyx_v_memview->view.suboffsets; - __pyx_v_suboffsets = __pyx_t_1; - - /* "View.MemoryView":1071 - * suboffsets = memview.view.suboffsets - * - * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< - * dst.data = memview.view.buf - * - */ - __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); - - /* "View.MemoryView":1072 - * - * dst.memview = <__pyx_memoryview *> memview - * dst.data = memview.view.buf # <<<<<<<<<<<<<< - * - * for dim in range(memview.view.ndim): - */ - __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); - - /* "View.MemoryView":1074 - * dst.data = memview.view.buf - * - * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - */ - __pyx_t_2 = __pyx_v_memview->view.ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_dim = __pyx_t_4; - - /* "View.MemoryView":1075 - * - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - */ - (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); - - /* "View.MemoryView":1076 - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - * - */ - (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); - - /* "View.MemoryView":1077 - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object') - */ - if ((__pyx_v_suboffsets != 0)) { - __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); - } else { - __pyx_t_5 = -1L; - } - (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; - } - - /* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { - __Pyx_memviewslice __pyx_v_memviewslice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy", 0); - - /* "View.MemoryView":1083 - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< - * return memoryview_copy_from_slice(memview, &memviewslice) - * - */ - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); - - /* "View.MemoryView":1084 - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) - * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object_from_slice') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { - PyObject *(*__pyx_v_to_object_func)(char *); - int (*__pyx_v_to_dtype_func)(char *, PyObject *); - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *(*__pyx_t_3)(char *); - int (*__pyx_t_4)(char *, PyObject *); - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1095 - * - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - */ - __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; - __pyx_v_to_object_func = __pyx_t_3; - - /* "View.MemoryView":1096 - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< - * else: - * to_object_func = NULL - */ - __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; - __pyx_v_to_dtype_func = __pyx_t_4; - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1098 - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - * to_object_func = NULL # <<<<<<<<<<<<<< - * to_dtype_func = NULL - * - */ - /*else*/ { - __pyx_v_to_object_func = NULL; - - /* "View.MemoryView":1099 - * else: - * to_object_func = NULL - * to_dtype_func = NULL # <<<<<<<<<<<<<< - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - */ - __pyx_v_to_dtype_func = NULL; - } - __pyx_L3:; - - /* "View.MemoryView":1101 - * to_dtype_func = NULL - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< - * to_object_func, to_dtype_func, - * memview.dtype_is_object) - */ - __Pyx_XDECREF(__pyx_r); - - /* "View.MemoryView":1103 - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - * to_object_func, to_dtype_func, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< - * if arg < 0: - * return -arg - */ - -static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { - Py_ssize_t __pyx_r; - int __pyx_t_1; - - /* "View.MemoryView":1110 - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: # <<<<<<<<<<<<<< - * return -arg - * else: - */ - __pyx_t_1 = ((__pyx_v_arg < 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1111 - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: - * return -arg # <<<<<<<<<<<<<< - * else: - * return arg - */ - __pyx_r = (-__pyx_v_arg); - goto __pyx_L0; - - /* "View.MemoryView":1110 - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: # <<<<<<<<<<<<<< - * return -arg - * else: - */ - } - - /* "View.MemoryView":1113 - * return -arg - * else: - * return arg # <<<<<<<<<<<<<< - * - * @cname('__pyx_get_best_slice_order') - */ - /*else*/ { - __pyx_r = __pyx_v_arg; - goto __pyx_L0; - } - - /* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< - * if arg < 0: - * return -arg - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1116 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - -static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { - int __pyx_v_i; - Py_ssize_t __pyx_v_c_stride; - Py_ssize_t __pyx_v_f_stride; - char __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1121 - * """ - * cdef int i - * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< - * cdef Py_ssize_t f_stride = 0 - * - */ - __pyx_v_c_stride = 0; - - /* "View.MemoryView":1122 - * cdef int i - * cdef Py_ssize_t c_stride = 0 - * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_f_stride = 0; - - /* "View.MemoryView":1124 - * cdef Py_ssize_t f_stride = 0 - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1125 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1126 - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1127 - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - goto __pyx_L4_break; - - /* "View.MemoryView":1125 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L4_break:; - - /* "View.MemoryView":1129 - * break - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - */ - __pyx_t_1 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_1; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1130 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1131 - * for i in range(ndim): - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1132 - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - */ - goto __pyx_L7_break; - - /* "View.MemoryView":1130 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L7_break:; - - /* "View.MemoryView":1134 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1135 - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - * return 'C' # <<<<<<<<<<<<<< - * else: - * return 'F' - */ - __pyx_r = 'C'; - goto __pyx_L0; - - /* "View.MemoryView":1134 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - } - - /* "View.MemoryView":1137 - * return 'C' - * else: - * return 'F' # <<<<<<<<<<<<<< - * - * @cython.cdivision(True) - */ - /*else*/ { - __pyx_r = 'F'; - goto __pyx_L0; - } - - /* "View.MemoryView":1116 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1140 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - -static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; - Py_ssize_t __pyx_v_dst_extent; - Py_ssize_t __pyx_v_src_stride; - Py_ssize_t __pyx_v_dst_stride; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - Py_ssize_t __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - - /* "View.MemoryView":1147 - * - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - */ - __pyx_v_src_extent = (__pyx_v_src_shape[0]); - - /* "View.MemoryView":1148 - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] - */ - __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); - - /* "View.MemoryView":1149 - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - */ - __pyx_v_src_stride = (__pyx_v_src_strides[0]); - - /* "View.MemoryView":1150 - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); - - /* "View.MemoryView":1152 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - */ - __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - - /* "View.MemoryView":1154 - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - */ - __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); - if (__pyx_t_2) { - __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); - } - __pyx_t_3 = (__pyx_t_2 != 0); - __pyx_t_1 = __pyx_t_3; - __pyx_L5_bool_binop_done:; - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - if (__pyx_t_1) { - - /* "View.MemoryView":1155 - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1157 - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - */ - /*else*/ { - __pyx_t_4 = __pyx_v_dst_extent; - __pyx_t_5 = __pyx_t_4; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1158 - * else: - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< - * src_data += src_stride - * dst_data += dst_stride - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); - - /* "View.MemoryView":1159 - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * else: - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1160 - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L4:; - - /* "View.MemoryView":1152 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1162 - * dst_data += dst_stride - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * _copy_strided_to_strided(src_data, src_strides + 1, - * dst_data, dst_strides + 1, - */ - /*else*/ { - __pyx_t_4 = __pyx_v_dst_extent; - __pyx_t_5 = __pyx_t_4; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1163 - * else: - * for i in range(dst_extent): - * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< - * dst_data, dst_strides + 1, - * src_shape + 1, dst_shape + 1, - */ - _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); - - /* "View.MemoryView":1167 - * src_shape + 1, dst_shape + 1, - * ndim - 1, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1168 - * ndim - 1, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1140 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - - /* function exit code */ -} - -/* "View.MemoryView":1170 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - */ - -static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - - /* "View.MemoryView":1173 - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< - * src.shape, dst.shape, ndim, itemsize) - * - */ - _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1170 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1177 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_size; - Py_ssize_t __pyx_r; - Py_ssize_t __pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - - /* "View.MemoryView":1179 - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< - * - * for shape in src.shape[:ndim]: - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_size = __pyx_t_1; - - /* "View.MemoryView":1181 - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - * - * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< - * size *= shape - * - */ - __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); - for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_v_shape = (__pyx_t_2[0]); - - /* "View.MemoryView":1182 - * - * for shape in src.shape[:ndim]: - * size *= shape # <<<<<<<<<<<<<< - * - * return size - */ - __pyx_v_size = (__pyx_v_size * __pyx_v_shape); - } - - /* "View.MemoryView":1184 - * size *= shape - * - * return size # <<<<<<<<<<<<<< - * - * @cname('__pyx_fill_contig_strides_array') - */ - __pyx_r = __pyx_v_size; - goto __pyx_L0; - - /* "View.MemoryView":1177 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1187 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) nogil: - */ - -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { - int __pyx_v_idx; - Py_ssize_t __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1196 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - __pyx_t_1 = ((__pyx_v_order == 'F') != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1197 - * - * if order == 'F': - * for idx in range(ndim): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - __pyx_t_2 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_idx = __pyx_t_4; - - /* "View.MemoryView":1198 - * if order == 'F': - * for idx in range(ndim): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * else: - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1199 - * for idx in range(ndim): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * else: - * for idx in range(ndim - 1, -1, -1): - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - - /* "View.MemoryView":1196 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1201 - * stride *= shape[idx] - * else: - * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - /*else*/ { - for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { - __pyx_v_idx = __pyx_t_2; - - /* "View.MemoryView":1202 - * else: - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1203 - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * - * return stride - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - } - __pyx_L3:; - - /* "View.MemoryView":1205 - * stride *= shape[idx] - * - * return stride # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_data_to_temp') - */ - __pyx_r = __pyx_v_stride; - goto __pyx_L0; - - /* "View.MemoryView":1187 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) nogil: - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1208 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { - int __pyx_v_i; - void *__pyx_v_result; - size_t __pyx_v_itemsize; - size_t __pyx_v_size; - void *__pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - struct __pyx_memoryview_obj *__pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":1219 - * cdef void *result - * - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef size_t size = slice_get_size(src, ndim) - * - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1220 - * - * cdef size_t itemsize = src.memview.view.itemsize - * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< - * - * result = malloc(size) - */ - __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); - - /* "View.MemoryView":1222 - * cdef size_t size = slice_get_size(src, ndim) - * - * result = malloc(size) # <<<<<<<<<<<<<< - * if not result: - * _err(MemoryError, NULL) - */ - __pyx_v_result = malloc(__pyx_v_size); - - /* "View.MemoryView":1223 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err(MemoryError, NULL) - * - */ - __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1224 - * result = malloc(size) - * if not result: - * _err(MemoryError, NULL) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) - - /* "View.MemoryView":1223 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err(MemoryError, NULL) - * - */ - } - - /* "View.MemoryView":1227 - * - * - * tmpslice.data = result # <<<<<<<<<<<<<< - * tmpslice.memview = src.memview - * for i in range(ndim): - */ - __pyx_v_tmpslice->data = ((char *)__pyx_v_result); - - /* "View.MemoryView":1228 - * - * tmpslice.data = result - * tmpslice.memview = src.memview # <<<<<<<<<<<<<< - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - */ - __pyx_t_4 = __pyx_v_src->memview; - __pyx_v_tmpslice->memview = __pyx_t_4; - - /* "View.MemoryView":1229 - * tmpslice.data = result - * tmpslice.memview = src.memview - * for i in range(ndim): # <<<<<<<<<<<<<< - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1230 - * tmpslice.memview = src.memview - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< - * tmpslice.suboffsets[i] = -1 - * - */ - (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); - - /* "View.MemoryView":1231 - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, - */ - (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1233 - * tmpslice.suboffsets[i] = -1 - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< - * ndim, order) - * - */ - (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); - - /* "View.MemoryView":1237 - * - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1238 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1239 - * for i in range(ndim): - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< - * - * if slice_is_contig(src[0], order, ndim): - */ - (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1238 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - } - } - - /* "View.MemoryView":1241 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1242 - * - * if slice_is_contig(src[0], order, ndim): - * memcpy(result, src.data, size) # <<<<<<<<<<<<<< - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - */ - (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); - - /* "View.MemoryView":1241 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":1244 - * memcpy(result, src.data, size) - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< - * - * return result - */ - /*else*/ { - copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); - } - __pyx_L9:; - - /* "View.MemoryView":1246 - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":1208 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = NULL; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1251 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - */ - -static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_extents", 0); - - /* "View.MemoryView":1254 - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - * (i, extent1, extent2)) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err_dim') - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_3 = 0; - - /* "View.MemoryView":1253 - * cdef int _err_extents(int i, Py_ssize_t extent1, - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< - * (i, extent1, extent2)) - * - */ - __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(1, 1253, __pyx_L1_error) - - /* "View.MemoryView":1251 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1257 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii') % dim) - * - */ - -static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_dim", 0); - __Pyx_INCREF(__pyx_v_error); - - /* "View.MemoryView":1258 - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: - * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err') - */ - __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_INCREF(__pyx_v_error); - __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - } - } - __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 1258, __pyx_L1_error) - - /* "View.MemoryView":1257 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii') % dim) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_error); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1261 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< - * if msg != NULL: - * raise error(msg.decode('ascii')) - */ - -static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err", 0); - __Pyx_INCREF(__pyx_v_error); - - /* "View.MemoryView":1262 - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii')) - * else: - */ - __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":1263 - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: - * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< - * else: - * raise error - */ - __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_error); - __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - } - } - __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 1263, __pyx_L1_error) - - /* "View.MemoryView":1262 - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii')) - * else: - */ - } - - /* "View.MemoryView":1265 - * raise error(msg.decode('ascii')) - * else: - * raise error # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_contents') - */ - /*else*/ { - __Pyx_Raise(__pyx_v_error, 0, 0, 0); - __PYX_ERR(1, 1265, __pyx_L1_error) - } - - /* "View.MemoryView":1261 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< - * if msg != NULL: - * raise error(msg.decode('ascii')) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_error); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1268 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { - void *__pyx_v_tmpdata; - size_t __pyx_v_itemsize; - int __pyx_v_i; - char __pyx_v_order; - int __pyx_v_broadcasting; - int __pyx_v_direct_copy; - __Pyx_memviewslice __pyx_v_tmp; - int __pyx_v_ndim; - int __pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - void *__pyx_t_7; - int __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":1276 - * Check for overlapping memory and verify the shapes. - * """ - * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - */ - __pyx_v_tmpdata = NULL; - - /* "View.MemoryView":1277 - * """ - * cdef void *tmpdata = NULL - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - */ - __pyx_t_1 = __pyx_v_src.memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1279 - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< - * cdef bint broadcasting = False - * cdef bint direct_copy = False - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); - - /* "View.MemoryView":1280 - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False # <<<<<<<<<<<<<< - * cdef bint direct_copy = False - * cdef __Pyx_memviewslice tmp - */ - __pyx_v_broadcasting = 0; - - /* "View.MemoryView":1281 - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False - * cdef bint direct_copy = False # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice tmp - * - */ - __pyx_v_direct_copy = 0; - - /* "View.MemoryView":1284 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1285 - * - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); - - /* "View.MemoryView":1284 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1286 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1287 - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< - * - * cdef int ndim = max(src_ndim, dst_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); - - /* "View.MemoryView":1286 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - } - __pyx_L3:; - - /* "View.MemoryView":1289 - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - __pyx_t_3 = __pyx_v_dst_ndim; - __pyx_t_4 = __pyx_v_src_ndim; - if (((__pyx_t_3 > __pyx_t_4) != 0)) { - __pyx_t_5 = __pyx_t_3; - } else { - __pyx_t_5 = __pyx_t_4; - } - __pyx_v_ndim = __pyx_t_5; - - /* "View.MemoryView":1291 - * cdef int ndim = max(src_ndim, dst_ndim) - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - */ - __pyx_t_5 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_5; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1292 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1293 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1294 - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - * broadcasting = True # <<<<<<<<<<<<<< - * src.strides[i] = 0 - * else: - */ - __pyx_v_broadcasting = 1; - - /* "View.MemoryView":1295 - * if src.shape[i] == 1: - * broadcasting = True - * src.strides[i] = 0 # <<<<<<<<<<<<<< - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) - */ - (__pyx_v_src.strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1293 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - goto __pyx_L7; - } - - /* "View.MemoryView":1297 - * src.strides[i] = 0 - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< - * - * if src.suboffsets[i] >= 0: - */ - /*else*/ { - __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) - } - __pyx_L7:; - - /* "View.MemoryView":1292 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - } - - /* "View.MemoryView":1299 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - */ - __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1300 - * - * if src.suboffsets[i] >= 0: - * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< - * - * if slices_overlap(&src, &dst, ndim, itemsize): - */ - __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) - - /* "View.MemoryView":1299 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - */ - } - } - - /* "View.MemoryView":1302 - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1304 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1305 - * - * if not slice_is_contig(src, order, ndim): - * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); - - /* "View.MemoryView":1304 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - } - - /* "View.MemoryView":1307 - * order = get_best_order(&dst, ndim) - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< - * src = tmp - * - */ - __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) - __pyx_v_tmpdata = __pyx_t_7; - - /* "View.MemoryView":1308 - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - * src = tmp # <<<<<<<<<<<<<< - * - * if not broadcasting: - */ - __pyx_v_src = __pyx_v_tmp; - - /* "View.MemoryView":1302 - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - } - - /* "View.MemoryView":1310 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1313 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1314 - * - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); - - /* "View.MemoryView":1313 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - goto __pyx_L12; - } - - /* "View.MemoryView":1315 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1316 - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< - * - * if direct_copy: - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); - - /* "View.MemoryView":1315 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - } - __pyx_L12:; - - /* "View.MemoryView":1318 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - __pyx_t_2 = (__pyx_v_direct_copy != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1320 - * if direct_copy: - * - * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1321 - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) - */ - (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); - - /* "View.MemoryView":1322 - * refcount_copying(&dst, dtype_is_object, ndim, False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * free(tmpdata) - * return 0 - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1323 - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1324 - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * if order == 'F' == get_best_order(&dst, ndim): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1318 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - } - - /* "View.MemoryView":1310 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1326 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = (__pyx_v_order == 'F'); - if (__pyx_t_2) { - __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); - } - __pyx_t_8 = (__pyx_t_2 != 0); - if (__pyx_t_8) { - - /* "View.MemoryView":1329 - * - * - * transpose_memslice(&src) # <<<<<<<<<<<<<< - * transpose_memslice(&dst) - * - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) - - /* "View.MemoryView":1330 - * - * transpose_memslice(&src) - * transpose_memslice(&dst) # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) - - /* "View.MemoryView":1326 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1332 - * transpose_memslice(&dst) - * - * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1333 - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, True) - * - */ - copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1334 - * refcount_copying(&dst, dtype_is_object, ndim, False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * - * free(tmpdata) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1336 - * refcount_copying(&dst, dtype_is_object, ndim, True) - * - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1337 - * - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_broadcast_leading') - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1268 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = -1; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1340 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) nogil: - */ - -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { - int __pyx_v_i; - int __pyx_v_offset; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - - /* "View.MemoryView":1344 - * int ndim_other) nogil: - * cdef int i - * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); - - /* "View.MemoryView":1346 - * cdef int offset = ndim_other - ndim - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1347 - * - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - */ - (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); - - /* "View.MemoryView":1348 - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - */ - (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1349 - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< - * - * for i in range(offset): - */ - (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); - } - - /* "View.MemoryView":1351 - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - * for i in range(offset): # <<<<<<<<<<<<<< - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - */ - __pyx_t_1 = __pyx_v_offset; - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1352 - * - * for i in range(offset): - * mslice.shape[i] = 1 # <<<<<<<<<<<<<< - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 - */ - (__pyx_v_mslice->shape[__pyx_v_i]) = 1; - - /* "View.MemoryView":1353 - * for i in range(offset): - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< - * mslice.suboffsets[i] = -1 - * - */ - (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); - - /* "View.MemoryView":1354 - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1340 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1362 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< - * int ndim, bint inc) nogil: - * - */ - -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { - int __pyx_t_1; - - /* "View.MemoryView":1366 - * - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, - * dst.strides, ndim, inc) - */ - __pyx_t_1 = (__pyx_v_dtype_is_object != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1367 - * - * if dtype_is_object: - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< - * dst.strides, ndim, inc) - * - */ - __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1366 - * - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, - * dst.strides, ndim, inc) - */ - } - - /* "View.MemoryView":1362 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< - * int ndim, bint inc) nogil: - * - */ - - /* function exit code */ -} - -/* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - */ - -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - __Pyx_RefNannyDeclarations - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); - - /* "View.MemoryView":1374 - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif -} - -/* "View.MemoryView":1377 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc): - * cdef Py_ssize_t i - */ - -static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); - - /* "View.MemoryView":1381 - * cdef Py_ssize_t i - * - * for i in range(shape[0]): # <<<<<<<<<<<<<< - * if ndim == 1: - * if inc: - */ - __pyx_t_1 = (__pyx_v_shape[0]); - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1382 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF(( data)[0]) - */ - __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":1383 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF(( data)[0]) - * else: - */ - __pyx_t_4 = (__pyx_v_inc != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":1384 - * if ndim == 1: - * if inc: - * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< - * else: - * Py_DECREF(( data)[0]) - */ - Py_INCREF((((PyObject **)__pyx_v_data)[0])); - - /* "View.MemoryView":1383 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF(( data)[0]) - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":1386 - * Py_INCREF(( data)[0]) - * else: - * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, - */ - /*else*/ { - Py_DECREF((((PyObject **)__pyx_v_data)[0])); - } - __pyx_L6:; - - /* "View.MemoryView":1382 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF(( data)[0]) - */ - goto __pyx_L5; - } - - /* "View.MemoryView":1388 - * Py_DECREF(( data)[0]) - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< - * ndim - 1, inc) - * - */ - /*else*/ { - - /* "View.MemoryView":1389 - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, - * ndim - 1, inc) # <<<<<<<<<<<<<< - * - * data += strides[0] - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); - } - __pyx_L5:; - - /* "View.MemoryView":1391 - * ndim - 1, inc) - * - * data += strides[0] # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - - /* "View.MemoryView":1377 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc): - * cdef Py_ssize_t i - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1397 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - */ - -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { - - /* "View.MemoryView":1400 - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, - * itemsize, item) - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1401 - * bint dtype_is_object) nogil: - * refcount_copying(dst, dtype_is_object, ndim, False) - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< - * itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1403 - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, - * itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * - * - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1397 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1407 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) nogil: - */ - -static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_extent; - int __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - Py_ssize_t __pyx_t_4; - - /* "View.MemoryView":1411 - * size_t itemsize, void *item) nogil: - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t extent = shape[0] - * - */ - __pyx_v_stride = (__pyx_v_strides[0]); - - /* "View.MemoryView":1412 - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] - * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_extent = (__pyx_v_shape[0]); - - /* "View.MemoryView":1414 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1415 - * - * if ndim == 1: - * for i in range(extent): # <<<<<<<<<<<<<< - * memcpy(data, item, itemsize) - * data += stride - */ - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1416 - * if ndim == 1: - * for i in range(extent): - * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< - * data += stride - * else: - */ - (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); - - /* "View.MemoryView":1417 - * for i in range(extent): - * memcpy(data, item, itemsize) - * data += stride # <<<<<<<<<<<<<< - * else: - * for i in range(extent): - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - - /* "View.MemoryView":1414 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1419 - * data += stride - * else: - * for i in range(extent): # <<<<<<<<<<<<<< - * _slice_assign_scalar(data, shape + 1, strides + 1, - * ndim - 1, itemsize, item) - */ - /*else*/ { - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1420 - * else: - * for i in range(extent): - * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< - * ndim - 1, itemsize, item) - * data += stride - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1422 - * _slice_assign_scalar(data, shape + 1, strides + 1, - * ndim - 1, itemsize, item) - * data += stride # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1407 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) nogil: - */ - - /* function exit code */ -} - -/* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v___pyx_type = 0; - long __pyx_v___pyx_checksum; - PyObject *__pyx_v___pyx_state = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - } - __pyx_v___pyx_type = values[0]; - __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) - __pyx_v___pyx_state = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_v___pyx_PickleError = 0; - PyObject *__pyx_v___pyx_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - */ - __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); - if (__pyx_t_1) { - - /* "(tree fragment)":5 - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: - * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_n_s_PickleError); - __Pyx_GIVEREF(__pyx_n_s_PickleError); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); - __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_t_2); - __pyx_v___pyx_PickleError = __pyx_t_2; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "(tree fragment)":6 - * if __pyx_checksum != 0xb068931: - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - */ - __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_INCREF(__pyx_v___pyx_PickleError); - __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 6, __pyx_L1_error) - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - */ - } - - /* "(tree fragment)":7 - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v___pyx_result = __pyx_t_3; - __pyx_t_3 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - */ - __pyx_t_1 = (__pyx_v___pyx_state != Py_None); - __pyx_t_6 = (__pyx_t_1 != 0); - if (__pyx_t_6) { - - /* "(tree fragment)":9 - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) - __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - */ - } - - /* "(tree fragment)":10 - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result # <<<<<<<<<<<<<< - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v___pyx_result); - __pyx_r = __pyx_v___pyx_result; - goto __pyx_L0; - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v___pyx_PickleError); - __Pyx_XDECREF(__pyx_v___pyx_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); - - /* "(tree fragment)":12 - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(1, 12, __pyx_L1_error) - } - __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_GOTREF(__pyx_v___pyx_result->name); - __Pyx_DECREF(__pyx_v___pyx_result->name); - __pyx_v___pyx_result->name = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(1, 13, __pyx_L1_error) - } - __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) - __pyx_t_4 = ((__pyx_t_3 > 1) != 0); - if (__pyx_t_4) { - } else { - __pyx_t_2 = __pyx_t_4; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) - __pyx_t_5 = (__pyx_t_4 != 0); - __pyx_t_2 = __pyx_t_5; - __pyx_L4_bool_binop_done:; - if (__pyx_t_2) { - - /* "(tree fragment)":14 - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< - */ - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(1, 14, __pyx_L1_error) - } - __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - } - } - __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - } - - /* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static struct __pyx_vtabstruct_array __pyx_vtable_array; - -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_array_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_array_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_array; - p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); - p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); - if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_array(PyObject *o) { - struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_array___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->mode); - Py_CLEAR(p->_format); - (*Py_TYPE(o)->tp_free)(o); -} -static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_array___setitem__(o, i, v); - } - else { - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); - return -1; - } -} - -static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { - PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); - if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - v = __pyx_array___getattr__(o, n); - } - return v; -} - -static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); -} - -static PyMethodDef __pyx_methods_array[] = { - {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_array[] = { - {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PySequenceMethods __pyx_tp_as_sequence_array = { - __pyx_array___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_array, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_array = { - __pyx_array___len__, /*mp_length*/ - __pyx_array___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_array = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_array_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_array = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.array", /*tp_name*/ - sizeof(struct __pyx_array_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_array, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - __pyx_tp_getattro_array, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ - 0, /*tp_doc*/ - 0, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_array, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_array, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_array, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; - -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - struct __pyx_MemviewEnum_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_MemviewEnum_obj *)o); - p->name = Py_None; Py_INCREF(Py_None); - return o; -} - -static void __pyx_tp_dealloc_Enum(PyObject *o) { - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - Py_CLEAR(p->name); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - if (p->name) { - e = (*v)(p->name, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_Enum(PyObject *o) { - PyObject* tmp; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - tmp = ((PyObject*)p->name); - p->name = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyMethodDef __pyx_methods_Enum[] = { - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static PyTypeObject __pyx_type___pyx_MemviewEnum = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.Enum", /*tp_name*/ - sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_Enum, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_MemviewEnum___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_Enum, /*tp_traverse*/ - __pyx_tp_clear_Enum, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_Enum, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - __pyx_MemviewEnum___init__, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_Enum, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; -static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; - -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryview_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_memoryview_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_memoryview; - p->obj = Py_None; Py_INCREF(Py_None); - p->_size = Py_None; Py_INCREF(Py_None); - p->_array_interface = Py_None; Py_INCREF(Py_None); - p->view.obj = NULL; - if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_memoryview(PyObject *o) { - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryview___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->obj); - Py_CLEAR(p->_size); - Py_CLEAR(p->_array_interface); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - if (p->obj) { - e = (*v)(p->obj, a); if (e) return e; - } - if (p->_size) { - e = (*v)(p->_size, a); if (e) return e; - } - if (p->_array_interface) { - e = (*v)(p->_array_interface, a); if (e) return e; - } - if (p->view.obj) { - e = (*v)(p->view.obj, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_memoryview(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - tmp = ((PyObject*)p->obj); - p->obj = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_size); - p->_size = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_array_interface); - p->_array_interface = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - Py_CLEAR(p->view.obj); - return 0; -} -static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_memoryview___setitem__(o, i, v); - } - else { - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); - return -1; - } -} - -static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); -} - -static PyMethodDef __pyx_methods_memoryview[] = { - {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, - {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, - {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, - {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_memoryview[] = { - {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, - {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, - {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, - {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, - {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, - {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, - {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, - {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, - {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PySequenceMethods __pyx_tp_as_sequence_memoryview = { - __pyx_memoryview___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_memoryview, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_memoryview = { - __pyx_memoryview___len__, /*mp_length*/ - __pyx_memoryview___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_memoryview = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_memoryview_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_memoryview = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.memoryview", /*tp_name*/ - sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_memoryview___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - __pyx_memoryview___str__, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_memoryview, /*tp_traverse*/ - __pyx_tp_clear_memoryview, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_memoryview, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_memoryview, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_memoryview, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; -static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; - -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryviewslice_obj *p; - PyObject *o = __pyx_tp_new_memoryview(t, a, k); - if (unlikely(!o)) return 0; - p = ((struct __pyx_memoryviewslice_obj *)o); - p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; - p->from_object = Py_None; Py_INCREF(Py_None); - p->from_slice.memview = NULL; - return o; -} - -static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryviewslice___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->from_object); - PyObject_GC_Track(o); - __pyx_tp_dealloc_memoryview(o); -} - -static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; - if (p->from_object) { - e = (*v)(p->from_object, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear__memoryviewslice(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - __pyx_tp_clear_memoryview(o); - tmp = ((PyObject*)p->from_object); - p->from_object = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - __PYX_XDEC_MEMVIEW(&p->from_slice, 1); - return 0; -} - -static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); -} - -static PyMethodDef __pyx_methods__memoryviewslice[] = { - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { - {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PyTypeObject __pyx_type___pyx_memoryviewslice = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core._memoryviewslice", /*tp_name*/ - sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - #if CYTHON_COMPILING_IN_PYPY - __pyx_memoryview___repr__, /*tp_repr*/ - #else - 0, /*tp_repr*/ - #endif - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - #if CYTHON_COMPILING_IN_PYPY - __pyx_memoryview___str__, /*tp_str*/ - #else - 0, /*tp_str*/ - #endif - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - "Internal class for passing memoryview slices to Python", /*tp_doc*/ - __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ - __pyx_tp_clear__memoryviewslice, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods__memoryviewslice, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets__memoryviewslice, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new__memoryviewslice, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; - -static PyMethodDef __pyx_methods[] = { - {"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0}, - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_core}, - {0, NULL} -}; -#endif - -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - "core", - 0, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, - {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, - {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, - {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, - {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, - {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, - {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, - {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, - {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, - {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, - {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, - {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, - {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, - {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, - {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, - {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, - {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, - {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, - {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, - {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, - {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, - {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, - {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, - {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, - {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, - {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, - {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, - {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, - {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, - {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, - {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, - {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, - {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, - {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, - {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, - {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, - {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, - {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, - {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, - {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, - {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, - {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, - {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, - {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, - {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, - {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, - {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, - {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, - {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, - {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, - {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, - {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, - {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, - {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, - {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, - {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, - {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, - {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, - {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, - {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, - {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) - __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) - __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) - __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) - __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) - __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) - __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) - __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "View.MemoryView":133 - * - * if not self.ndim: - * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< - * - * if itemsize <= 0: - */ - __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__2); - __Pyx_GIVEREF(__pyx_tuple__2); - - /* "View.MemoryView":136 - * - * if itemsize <= 0: - * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< - * - * if not isinstance(format, bytes): - */ - __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__3); - __Pyx_GIVEREF(__pyx_tuple__3); - - /* "View.MemoryView":148 - * - * if not self._shape: - * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__4); - __Pyx_GIVEREF(__pyx_tuple__4); - - /* "View.MemoryView":176 - * self.data = malloc(self.len) - * if not self.data: - * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< - * - * if self.dtype_is_object: - */ - __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__5); - __Pyx_GIVEREF(__pyx_tuple__5); - - /* "View.MemoryView":192 - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< - * info.buf = self.data - * info.len = self.len - */ - __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__6); - __Pyx_GIVEREF(__pyx_tuple__6); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__7); - __Pyx_GIVEREF(__pyx_tuple__7); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__8); - __Pyx_GIVEREF(__pyx_tuple__8); - - /* "View.MemoryView":418 - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< - * - * have_slices, index = _unellipsify(index, self.view.ndim) - */ - __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__9); - __Pyx_GIVEREF(__pyx_tuple__9); - - /* "View.MemoryView":495 - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< - * else: - * if len(self.view.format) == 1: - */ - __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__10); - __Pyx_GIVEREF(__pyx_tuple__10); - - /* "View.MemoryView":520 - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< - * - * if flags & PyBUF_ND: - */ - __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__11); - __Pyx_GIVEREF(__pyx_tuple__11); - - /* "View.MemoryView":570 - * if self.view.strides == NULL: - * - * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - */ - __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__12); - __Pyx_GIVEREF(__pyx_tuple__12); - - /* "View.MemoryView":577 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__13); - __Pyx_INCREF(__pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_tuple__13); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__14); - __Pyx_GIVEREF(__pyx_tuple__14); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__15); - __Pyx_GIVEREF(__pyx_tuple__15); - - /* "View.MemoryView":682 - * if item is Ellipsis: - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< - * seen_ellipsis = True - * else: - */ - __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - - /* "View.MemoryView":703 - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__17); - __Pyx_GIVEREF(__pyx_tuple__17); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__18); - __Pyx_GIVEREF(__pyx_tuple__18); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__19); - __Pyx_GIVEREF(__pyx_tuple__19); - - /* "View.MemoryView":286 - * return self.name - * - * cdef generic = Enum("") # <<<<<<<<<<<<<< - * cdef strided = Enum("") # default - * cdef indirect = Enum("") - */ - __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__20); - __Pyx_GIVEREF(__pyx_tuple__20); - - /* "View.MemoryView":287 - * - * cdef generic = Enum("") - * cdef strided = Enum("") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("") - * - */ - __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__21); - __Pyx_GIVEREF(__pyx_tuple__21); - - /* "View.MemoryView":288 - * cdef generic = Enum("") - * cdef strided = Enum("") # default - * cdef indirect = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__22); - __Pyx_GIVEREF(__pyx_tuple__22); - - /* "View.MemoryView":291 - * - * - * cdef contiguous = Enum("") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("") - * - */ - __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__23); - __Pyx_GIVEREF(__pyx_tuple__23); - - /* "View.MemoryView":292 - * - * cdef contiguous = Enum("") - * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__24); - __Pyx_GIVEREF(__pyx_tuple__24); - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__25); - __Pyx_GIVEREF(__pyx_tuple__25); - __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - /* InitThreads.init */ - #ifdef WITH_THREAD -PyEval_InitThreads(); -#endif - -if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) - - if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - generic = Py_None; Py_INCREF(Py_None); - strided = Py_None; Py_INCREF(Py_None); - indirect = Py_None; Py_INCREF(Py_None); - contiguous = Py_None; Py_INCREF(Py_None); - indirect_contiguous = Py_None; Py_INCREF(Py_None); - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __pyx_vtabptr_array = &__pyx_vtable_array; - __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; - if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_array.tp_print = 0; - #endif - if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) - __pyx_array_type = &__pyx_type___pyx_array; - if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_MemviewEnum.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) - __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; - __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; - __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; - __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; - __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; - __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; - __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; - __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; - __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; - if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_memoryview.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) - __pyx_memoryview_type = &__pyx_type___pyx_memoryview; - __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; - __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; - __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; - __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; - __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; - if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_memoryviewslice.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) - __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initcore(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_core(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { - result = PyDict_SetItemString(moddict, to_name, value); - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - PyObject *__pyx_t_1 = NULL; - static PyThread_type_lock __pyx_t_2[8]; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_monotonic_align__core) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "monotonic_align.core")) { - if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - (void)__Pyx_modinit_type_import_code(); - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - __pyx_k_ = (-1e9); - - /* "monotonic_align/core.pyx":1 - * cimport cython # <<<<<<<<<<<<<< - * from cython.parallel import prange - * - */ - __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":209 - * info.obj = self - * - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * def __dealloc__(array self): - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_array_type); - - /* "View.MemoryView":286 - * return self.name - * - * cdef generic = Enum("") # <<<<<<<<<<<<<< - * cdef strided = Enum("") # default - * cdef indirect = Enum("") - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(generic); - __Pyx_DECREF_SET(generic, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":287 - * - * cdef generic = Enum("") - * cdef strided = Enum("") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("") - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(strided); - __Pyx_DECREF_SET(strided, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":288 - * cdef generic = Enum("") - * cdef strided = Enum("") # default - * cdef indirect = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(indirect); - __Pyx_DECREF_SET(indirect, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":291 - * - * - * cdef contiguous = Enum("") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("") - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(contiguous); - __Pyx_DECREF_SET(contiguous, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":292 - * - * cdef contiguous = Enum("") - * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(indirect_contiguous); - __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":316 - * - * DEF THREAD_LOCKS_PREALLOCATED = 8 - * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< - * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ - * PyThread_allocate_lock(), - */ - __pyx_memoryview_thread_locks_used = 0; - - /* "View.MemoryView":317 - * DEF THREAD_LOCKS_PREALLOCATED = 8 - * cdef int __pyx_memoryview_thread_locks_used = 0 - * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< - * PyThread_allocate_lock(), - * PyThread_allocate_lock(), - */ - __pyx_t_2[0] = PyThread_allocate_lock(); - __pyx_t_2[1] = PyThread_allocate_lock(); - __pyx_t_2[2] = PyThread_allocate_lock(); - __pyx_t_2[3] = PyThread_allocate_lock(); - __pyx_t_2[4] = PyThread_allocate_lock(); - __pyx_t_2[5] = PyThread_allocate_lock(); - __pyx_t_2[6] = PyThread_allocate_lock(); - __pyx_t_2[7] = PyThread_allocate_lock(); - memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); - - /* "View.MemoryView":549 - * info.obj = self - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_memoryview_type); - - /* "View.MemoryView":995 - * return self.from_object - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_memoryviewslice_type); - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - if (__pyx_d) { - __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - Py_CLEAR(__pyx_m); - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); - if (unlikely(!result)) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* MemviewSliceInit */ -static int -__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference) -{ - __Pyx_RefNannyDeclarations - int i, retval=-1; - Py_buffer *buf = &memview->view; - __Pyx_RefNannySetupContext("init_memviewslice", 0); - if (unlikely(memviewslice->memview || memviewslice->data)) { - PyErr_SetString(PyExc_ValueError, - "memviewslice is already initialized!"); - goto fail; - } - if (buf->strides) { - for (i = 0; i < ndim; i++) { - memviewslice->strides[i] = buf->strides[i]; - } - } else { - Py_ssize_t stride = buf->itemsize; - for (i = ndim - 1; i >= 0; i--) { - memviewslice->strides[i] = stride; - stride *= buf->shape[i]; - } - } - for (i = 0; i < ndim; i++) { - memviewslice->shape[i] = buf->shape[i]; - if (buf->suboffsets) { - memviewslice->suboffsets[i] = buf->suboffsets[i]; - } else { - memviewslice->suboffsets[i] = -1; - } - } - memviewslice->memview = memview; - memviewslice->data = (char *)buf->buf; - if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { - Py_INCREF(memview); - } - retval = 0; - goto no_fail; -fail: - memviewslice->memview = 0; - memviewslice->data = 0; - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} -#ifndef Py_NO_RETURN -#define Py_NO_RETURN -#endif -static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { - va_list vargs; - char msg[200]; -#ifdef HAVE_STDARG_PROTOTYPES - va_start(vargs, fmt); -#else - va_start(vargs); -#endif - vsnprintf(msg, 200, fmt, vargs); - va_end(vargs); - Py_FatalError(msg); -} -static CYTHON_INLINE int -__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)++; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE int -__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)--; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE void -__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) -{ - int first_time; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) - return; - if (unlikely(__pyx_get_slice_count(memview) < 0)) - __pyx_fatalerror("Acquisition count is %d (line %d)", - __pyx_get_slice_count(memview), lineno); - first_time = __pyx_add_acquisition_count(memview) == 0; - if (unlikely(first_time)) { - if (have_gil) { - Py_INCREF((PyObject *) memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_INCREF((PyObject *) memview); - PyGILState_Release(_gilstate); - } - } -} -static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, - int have_gil, int lineno) { - int last_time; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) { - memslice->memview = NULL; - return; - } - if (unlikely(__pyx_get_slice_count(memview) <= 0)) - __pyx_fatalerror("Acquisition count is %d (line %d)", - __pyx_get_slice_count(memview), lineno); - last_time = __pyx_sub_acquisition_count(memview) == 1; - memslice->data = NULL; - if (unlikely(last_time)) { - if (have_gil) { - Py_CLEAR(memslice->memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_CLEAR(memslice->memview); - PyGILState_Release(_gilstate); - } - } else { - memslice->memview = NULL; - } -} - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = (**name == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* None */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { - PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); -} - -/* ArgTypeTest */ -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) -{ - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - else if (exact) { - #if PY_MAJOR_VERSION == 2 - if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; - #endif - } - else { - if (likely(__Pyx_TypeCheck(obj, type))) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -/* PyObjectCall */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = func->ob_type->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyErrFetchRestore */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} -#endif - -/* RaiseException */ -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, - CYTHON_UNUSED PyObject *cause) { - __Pyx_PyThreadState_declare - Py_XINCREF(type); - if (!value || value == Py_None) - value = NULL; - else - Py_INCREF(value); - if (!tb || tb == Py_None) - tb = NULL; - else { - Py_INCREF(tb); - if (!PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - } - if (PyType_Check(type)) { -#if CYTHON_COMPILING_IN_PYPY - if (!value) { - Py_INCREF(Py_None); - value = Py_None; - } -#endif - PyErr_NormalizeException(&type, &value, &tb); - } else { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - value = type; - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - } - __Pyx_PyThreadState_assign - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} -#else -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - PyObject* owned_instance = NULL; - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (PyExceptionClass_Check(type)) { - PyObject *instance_class = NULL; - if (value && PyExceptionInstance_Check(value)) { - instance_class = (PyObject*) Py_TYPE(value); - if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - goto bad; - } else { - type = instance_class; - } - } - } - if (!instance_class) { - PyObject *args; - if (!value) - args = PyTuple_New(0); - else if (PyTuple_Check(value)) { - Py_INCREF(value); - args = value; - } else - args = PyTuple_Pack(1, value); - if (!args) - goto bad; - owned_instance = PyObject_Call(type, args, NULL); - Py_DECREF(args); - if (!owned_instance) - goto bad; - value = owned_instance; - if (!PyExceptionInstance_Check(value)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %R", - type, Py_TYPE(value)); - goto bad; - } - } - } else { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - if (cause) { - PyObject *fixed_cause; - if (cause == Py_None) { - fixed_cause = NULL; - } else if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - PyException_SetCause(value, fixed_cause); - } - PyErr_SetObject(type, value); - if (tb) { -#if CYTHON_COMPILING_IN_PYPY - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#else - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } -#endif - } -bad: - Py_XDECREF(owned_instance); - return; -} -#endif - -/* PyCFunctionFastCall */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { - PyCFunctionObject *func = (PyCFunctionObject*)func_obj; - PyCFunction meth = PyCFunction_GET_FUNCTION(func); - PyObject *self = PyCFunction_GET_SELF(func); - int flags = PyCFunction_GET_FLAGS(func); - assert(PyCFunction_Check(func)); - assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); - assert(nargs >= 0); - assert(nargs == 0 || args != NULL); - /* _PyCFunction_FastCallDict() must not be called with an exception set, - because it may clear it (directly or indirectly) and so the - caller loses its exception */ - assert(!PyErr_Occurred()); - if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { - return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); - } else { - return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); - } -} -#endif - -/* PyFunctionFastCall */ -#if CYTHON_FAST_PYCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif -#endif - -/* PyObjectCall2Args */ -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { - PyObject *args, *result = NULL; - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyFunction_FastCall(function, args, 2); - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyCFunction_FastCall(function, args, 2); - } - #endif - args = PyTuple_New(2); - if (unlikely(!args)) goto done; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - Py_INCREF(function); - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - Py_DECREF(function); -done: - return result; -} - -/* PyObjectCallMethO */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectCallOneArg */ -#if CYTHON_COMPILING_IN_CPYTHON -static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_New(1); - if (unlikely(!args)) return NULL; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, &arg, 1); - } -#endif - if (likely(PyCFunction_Check(func))) { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, arg); -#if CYTHON_FAST_PYCCALL - } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { - return __Pyx_PyCFunction_FastCall(func, &arg, 1); -#endif - } - } - return __Pyx__PyObject_CallOneArg(func, arg); -} -#else -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_Pack(1, arg); - if (unlikely(!args)) return NULL; - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -#endif - -/* BytesEquals */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - const char *ps1, *ps2; - Py_ssize_t length = PyBytes_GET_SIZE(s1); - if (length != PyBytes_GET_SIZE(s2)) - return (equals == Py_NE); - ps1 = PyBytes_AS_STRING(s1); - ps2 = PyBytes_AS_STRING(s2); - if (ps1[0] != ps2[0]) { - return (equals == Py_NE); - } else if (length == 1) { - return (equals == Py_EQ); - } else { - int result; -#if CYTHON_USE_UNICODE_INTERNALS - Py_hash_t hash1, hash2; - hash1 = ((PyBytesObject*)s1)->ob_shash; - hash2 = ((PyBytesObject*)s2)->ob_shash; - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - return (equals == Py_NE); - } -#endif - result = memcmp(ps1, ps2, (size_t)length); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - -/* UnicodeEquals */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else -#if PY_MAJOR_VERSION < 3 - PyObject* owned_ref = NULL; -#endif - int s1_is_unicode, s2_is_unicode; - if (s1 == s2) { - goto return_eq; - } - s1_is_unicode = PyUnicode_CheckExact(s1); - s2_is_unicode = PyUnicode_CheckExact(s2); -#if PY_MAJOR_VERSION < 3 - if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { - owned_ref = PyUnicode_FromObject(s2); - if (unlikely(!owned_ref)) - return -1; - s2 = owned_ref; - s2_is_unicode = 1; - } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { - owned_ref = PyUnicode_FromObject(s1); - if (unlikely(!owned_ref)) - return -1; - s1 = owned_ref; - s1_is_unicode = 1; - } else if (((!s2_is_unicode) & (!s1_is_unicode))) { - return __Pyx_PyBytes_Equals(s1, s2, equals); - } -#endif - if (s1_is_unicode & s2_is_unicode) { - Py_ssize_t length; - int kind; - void *data1, *data2; - if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) - return -1; - length = __Pyx_PyUnicode_GET_LENGTH(s1); - if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { - goto return_ne; - } -#if CYTHON_USE_UNICODE_INTERNALS - { - Py_hash_t hash1, hash2; - #if CYTHON_PEP393_ENABLED - hash1 = ((PyASCIIObject*)s1)->hash; - hash2 = ((PyASCIIObject*)s2)->hash; - #else - hash1 = ((PyUnicodeObject*)s1)->hash; - hash2 = ((PyUnicodeObject*)s2)->hash; - #endif - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - goto return_ne; - } - } -#endif - kind = __Pyx_PyUnicode_KIND(s1); - if (kind != __Pyx_PyUnicode_KIND(s2)) { - goto return_ne; - } - data1 = __Pyx_PyUnicode_DATA(s1); - data2 = __Pyx_PyUnicode_DATA(s2); - if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { - goto return_ne; - } else if (length == 1) { - goto return_eq; - } else { - int result = memcmp(data1, data2, (size_t)(length * kind)); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & s2_is_unicode) { - goto return_ne; - } else if ((s2 == Py_None) & s1_is_unicode) { - goto return_ne; - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -return_eq: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ); -return_ne: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_NE); -#endif -} - -/* None */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { - Py_ssize_t q = a / b; - Py_ssize_t r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* GetAttr */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { -#if CYTHON_USE_TYPE_SLOTS -#if PY_MAJOR_VERSION >= 3 - if (likely(PyUnicode_Check(n))) -#else - if (likely(PyString_Check(n))) -#endif - return __Pyx_PyObject_GetAttrStr(o, n); -#endif - return PyObject_GetAttr(o, n); -} - -/* GetItemInt */ -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return m->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* ObjectGetItem */ -#if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { - PyObject *runerr; - Py_ssize_t key_value; - PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; - if (unlikely(!(m && m->sq_item))) { - PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); - return NULL; - } - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { - PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; - if (likely(m && m->mp_subscript)) { - return m->mp_subscript(obj, key); - } - return __Pyx_PyObject_GetIndex(obj, key); -} -#endif - -/* decode_c_string */ -static CYTHON_INLINE PyObject* __Pyx_decode_c_string( - const char* cstring, Py_ssize_t start, Py_ssize_t stop, - const char* encoding, const char* errors, - PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { - Py_ssize_t length; - if (unlikely((start < 0) | (stop < 0))) { - size_t slen = strlen(cstring); - if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { - PyErr_SetString(PyExc_OverflowError, - "c-string too long to convert to Python"); - return NULL; - } - length = (Py_ssize_t) slen; - if (start < 0) { - start += length; - if (start < 0) - start = 0; - } - if (stop < 0) - stop += length; - } - if (unlikely(stop <= start)) - return __Pyx_NewRef(__pyx_empty_unicode); - length = stop - start; - cstring += start; - if (decode_func) { - return decode_func(cstring, length, errors); - } else { - return PyUnicode_Decode(cstring, length, encoding, errors); - } -} - -/* PyErrExceptionMatches */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; icurexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; - if (unlikely(PyTuple_Check(err))) - return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); -} -#endif - -/* GetAttr3 */ -static PyObject *__Pyx_GetAttr3Default(PyObject *d) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - return NULL; - __Pyx_PyErr_Clear(); - Py_INCREF(d); - return d; -} -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { - PyObject *r = __Pyx_GetAttr(o, n); - return (likely(r)) ? r : __Pyx_GetAttr3Default(d); -} - -/* PyDictVersioning */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* RaiseTooManyValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); -} - -/* RaiseNeedMoreValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", - index, (index == 1) ? "" : "s"); -} - -/* RaiseNoneIterError */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -/* ExtTypeTest */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(__Pyx_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -/* GetTopmostException */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - -/* SaveResetException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; - #else - *type = tstate->exc_type; - *value = tstate->exc_value; - *tb = tstate->exc_traceback; - #endif - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); -} -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = type; - tstate->exc_value = value; - tstate->exc_traceback = tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -#endif - -/* GetException */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ - PyObject *local_type, *local_value, *local_tb; -#if CYTHON_FAST_THREAD_STATE - PyObject *tmp_type, *tmp_value, *tmp_tb; - local_type = tstate->curexc_type; - local_value = tstate->curexc_value; - local_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -#else - PyErr_Fetch(&local_type, &local_value, &local_tb); -#endif - PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE - if (unlikely(tstate->curexc_type)) -#else - if (unlikely(PyErr_Occurred())) -#endif - goto bad; - #if PY_MAJOR_VERSION >= 3 - if (local_tb) { - if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) - goto bad; - } - #endif - Py_XINCREF(local_tb); - Py_XINCREF(local_type); - Py_XINCREF(local_value); - *type = local_type; - *value = local_value; - *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - } - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = local_type; - tstate->exc_value = local_value; - tstate->exc_traceback = local_tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#else - PyErr_SetExcInfo(local_type, local_value, local_tb); -#endif - return 0; -bad: - *type = 0; - *value = 0; - *tb = 0; - Py_XDECREF(local_type); - Py_XDECREF(local_value); - Py_XDECREF(local_tb); - return -1; -} - -/* SwapException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = *type; - exc_info->exc_value = *value; - exc_info->exc_traceback = *tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = *type; - tstate->exc_value = *value; - tstate->exc_traceback = *tb; - #endif - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); - PyErr_SetExcInfo(*type, *value, *tb); - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#endif - -/* Import */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (!py_import) - goto bad; - #endif - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, 1); - if (!module) { - if (!PyErr_ExceptionMatches(PyExc_ImportError)) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, level); - #endif - } - } -bad: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - Py_XDECREF(empty_list); - Py_XDECREF(empty_dict); - return module; -} - -/* FastTypeChecks */ -#if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = a->tp_base; - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; - if (!res) { - res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } - return res; -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i= 0 || (x^b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - } - x = a + b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla + llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; - double a = PyFloat_AS_DOUBLE(op1); - double result; - PyFPE_START_PROTECT("add", return NULL) - result = ((double)a) + (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); -} -#endif - -/* None */ -static CYTHON_INLINE long __Pyx_div_long(long a, long b) { - long q = a / b; - long r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* ImportFrom */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { - PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); - if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Format(PyExc_ImportError, - #if PY_MAJOR_VERSION < 3 - "cannot import name %.230s", PyString_AS_STRING(name)); - #else - "cannot import name %S", name); - #endif - } - return value; -} - -/* HasAttr */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { - PyObject *r; - if (unlikely(!__Pyx_PyBaseString_Check(n))) { - PyErr_SetString(PyExc_TypeError, - "hasattr(): attribute name must be string"); - return -1; - } - r = __Pyx_GetAttr(o, n); - if (unlikely(!r)) { - PyErr_Clear(); - return 0; - } else { - Py_DECREF(r); - return 1; - } -} - -/* PyObject_GenericGetAttrNoDict */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'%.50s' object has no attribute '%U'", - tp->tp_name, attr_name); -#else - "'%.50s' object has no attribute '%.400s'", - tp->tp_name, PyString_AS_STRING(attr_name)); -#endif - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { - PyObject *descr; - PyTypeObject *tp = Py_TYPE(obj); - if (unlikely(!PyString_Check(attr_name))) { - return PyObject_GenericGetAttr(obj, attr_name); - } - assert(!tp->tp_dictoffset); - descr = _PyType_Lookup(tp, attr_name); - if (unlikely(!descr)) { - return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); - } - Py_INCREF(descr); - #if PY_MAJOR_VERSION < 3 - if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) - #endif - { - descrgetfunc f = Py_TYPE(descr)->tp_descr_get; - if (unlikely(f)) { - PyObject *res = f(descr, obj, (PyObject *)tp); - Py_DECREF(descr); - return res; - } - } - return descr; -} -#endif - -/* PyObject_GenericGetAttr */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { - if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { - return PyObject_GenericGetAttr(obj, attr_name); - } - return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); -} -#endif - -/* SetVTable */ -static int __Pyx_SetVtable(PyObject *dict, void *vtable) { -#if PY_VERSION_HEX >= 0x02070000 - PyObject *ob = PyCapsule_New(vtable, 0, 0); -#else - PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); -#endif - if (!ob) - goto bad; - if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) - goto bad; - Py_DECREF(ob); - return 0; -bad: - Py_XDECREF(ob); - return -1; -} - -/* PyObjectGetAttrStrNoError */ -static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - __Pyx_PyErr_Clear(); -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { - PyObject *result; -#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { - return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); - } -#endif - result = __Pyx_PyObject_GetAttrStr(obj, attr_name); - if (unlikely(!result)) { - __Pyx_PyObject_GetAttrStr_ClearAttributeError(); - } - return result; -} - -/* SetupReduce */ -static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { - int ret; - PyObject *name_attr; - name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); - if (likely(name_attr)) { - ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); - } else { - ret = -1; - } - if (unlikely(ret < 0)) { - PyErr_Clear(); - ret = 0; - } - Py_XDECREF(name_attr); - return ret; -} -static int __Pyx_setup_reduce(PyObject* type_obj) { - int ret = 0; - PyObject *object_reduce = NULL; - PyObject *object_reduce_ex = NULL; - PyObject *reduce = NULL; - PyObject *reduce_ex = NULL; - PyObject *reduce_cython = NULL; - PyObject *setstate = NULL; - PyObject *setstate_cython = NULL; -#if CYTHON_USE_PYTYPE_LOOKUP - if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; -#else - if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; -#endif -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#else - object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#endif - reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; - if (reduce_ex == object_reduce_ex) { -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#else - object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#endif - reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; - if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { - reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); - if (likely(reduce_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (reduce == object_reduce || PyErr_Occurred()) { - goto __PYX_BAD; - } - setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); - if (!setstate) PyErr_Clear(); - if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { - setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); - if (likely(setstate_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (!setstate || PyErr_Occurred()) { - goto __PYX_BAD; - } - } - PyType_Modified((PyTypeObject*)type_obj); - } - } - goto __PYX_GOOD; -__PYX_BAD: - if (!PyErr_Occurred()) - PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); - ret = -1; -__PYX_GOOD: -#if !CYTHON_USE_PYTYPE_LOOKUP - Py_XDECREF(object_reduce); - Py_XDECREF(object_reduce_ex); -#endif - Py_XDECREF(reduce); - Py_XDECREF(reduce_ex); - Py_XDECREF(reduce_cython); - Py_XDECREF(setstate); - Py_XDECREF(setstate_cython); - return ret; -} - -/* CLineInTraceback */ -#ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} - -/* AddTraceback */ -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(filename); - #else - py_srcfile = PyUnicode_FromString(filename); - #endif - if (!py_srcfile) goto bad; - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - Py_DECREF(py_funcname); - return py_code; -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) goto bad; - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); - PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; -} -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject *obj = view->obj; - if (!obj) return; - if (PyObject_CheckBuffer(obj)) { - PyBuffer_Release(view); - return; - } - if ((0)) {} - view->obj = NULL; - Py_DECREF(obj); -} -#endif - - -/* MemviewSliceIsContig */ -static int -__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) -{ - int i, index, step, start; - Py_ssize_t itemsize = mvs.memview->view.itemsize; - if (order == 'F') { - step = 1; - start = 0; - } else { - step = -1; - start = ndim - 1; - } - for (i = 0; i < ndim; i++) { - index = start + step * i; - if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) - return 0; - itemsize *= mvs.shape[index]; - } - return 1; -} - -/* OverlappingSlices */ -static void -__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, - void **out_start, void **out_end, - int ndim, size_t itemsize) -{ - char *start, *end; - int i; - start = end = slice->data; - for (i = 0; i < ndim; i++) { - Py_ssize_t stride = slice->strides[i]; - Py_ssize_t extent = slice->shape[i]; - if (extent == 0) { - *out_start = *out_end = start; - return; - } else { - if (stride > 0) - end += stride * (extent - 1); - else - start += stride * (extent - 1); - } - } - *out_start = start; - *out_end = end + itemsize; -} -static int -__pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize) -{ - void *start1, *end1, *start2, *end2; - __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); - __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); - return (start1 < end2) && (start2 < end1); -} - -/* Capsule */ -static CYTHON_INLINE PyObject * -__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) -{ - PyObject *cobj; -#if PY_VERSION_HEX >= 0x02070000 - cobj = PyCapsule_New(p, sig, NULL); -#else - cobj = PyCObject_FromVoidPtr(p, NULL); -#endif - return cobj; -} - -/* IsLittleEndian */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) -{ - union { - uint32_t u32; - uint8_t u8[4]; - } S; - S.u32 = 0x01020304; - return S.u8[0] == 4; -} - -/* BufferFormatCheck */ -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - ctx->is_valid_array = 0; - ctx->struct_alignment = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t <= '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} -static int __Pyx_BufFmt_ExpectNumber(const char **ts) { - int number = __Pyx_BufFmt_ParseNumber(ts); - if (number == -1) - PyErr_Format(PyExc_ValueError,\ - "Does not understand character buffer dtype format string ('%c')", **ts); - return number; -} -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case '?': return "'bool'"; - case 'c': return "'char'"; - case 'b': return "'signed char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 's': case 'p': return "a string"; - case 0: return "end"; - default: return "unparseable format string"; - } -} -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -/* These are for computing the padding at the end of the struct to align - on the first member of the struct. This will probably the same as above, - but we don't have any guarantees. - */ -typedef struct { short x; char c; } __Pyx_pad_short; -typedef struct { int x; char c; } __Pyx_pad_int; -typedef struct { long x; char c; } __Pyx_pad_long; -typedef struct { float x; char c; } __Pyx_pad_float; -typedef struct { double x; char c; } __Pyx_pad_double; -typedef struct { long double x; char c; } __Pyx_pad_longdouble; -typedef struct { void *x; char c; } __Pyx_pad_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); - case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); - case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': - return 'H'; - case 'b': case 'h': case 'i': - case 'l': case 'q': case 's': case 'p': - return 'I'; - case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': - return 'U'; - case 'f': case 'd': case 'g': - return (is_complex ? 'C' : 'R'); - case 'O': - return 'O'; - case 'P': - return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset, arraysize = 1; - if (ctx->enc_type == 0) return 0; - if (ctx->head->field->type->arraysize[0]) { - int i, ndim = 0; - if (ctx->enc_type == 's' || ctx->enc_type == 'p') { - ctx->is_valid_array = ctx->head->field->type->ndim == 1; - ndim = 1; - if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { - PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %zu", - ctx->head->field->type->arraysize[0], ctx->enc_count); - return -1; - } - } - if (!ctx->is_valid_array) { - PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", - ctx->head->field->type->ndim, ndim); - return -1; - } - for (i = 0; i < ctx->head->field->type->ndim; i++) { - arraysize *= ctx->head->field->type->arraysize[i]; - } - ctx->is_valid_array = 0; - ctx->enc_count = 1; - } - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - if (ctx->struct_alignment == 0) - ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, - ctx->is_complex); - } - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - if ((type->typegroup == 'H' || group == 'H') && type->size == size) { - } else { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - } - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - ctx->fmt_offset += size; - if (arraysize) - ctx->fmt_offset += (arraysize - 1) * size; - --ctx->enc_count; - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} -static PyObject * -__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) -{ - const char *ts = *tsp; - int i = 0, number, ndim; - ++ts; - if (ctx->new_count != 1) { - PyErr_SetString(PyExc_ValueError, - "Cannot handle repeated arrays in format string"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ndim = ctx->head->field->type->ndim; - while (*ts && *ts != ')') { - switch (*ts) { - case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; - default: break; - } - number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) - return PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %d", - ctx->head->field->type->arraysize[i], number); - if (*ts != ',' && *ts != ')') - return PyErr_Format(PyExc_ValueError, - "Expected a comma in format string, got '%c'", *ts); - if (*ts == ',') ts++; - i++; - } - if (i != ndim) - return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", - ctx->head->field->type->ndim, i); - if (!*ts) { - PyErr_SetString(PyExc_ValueError, - "Unexpected end of format string, expected ')'"); - return NULL; - } - ctx->is_valid_array = 1; - ctx->new_count = 1; - *tsp = ++ts; - return Py_None; -} -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case '\r': - case '\n': - ++ts; - break; - case '<': - if (!__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - size_t struct_alignment = ctx->struct_alignment; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - ctx->enc_count = 0; - ctx->struct_alignment = 0; - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - if (struct_alignment) ctx->struct_alignment = struct_alignment; - } - break; - case '}': - { - size_t alignment = ctx->struct_alignment; - ++ts; - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - if (alignment && ctx->fmt_offset % alignment) { - ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); - } - } - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } - CYTHON_FALLTHROUGH; - case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': case 'p': - if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && - (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { - ctx->enc_count += ctx->new_count; - ctx->new_count = 1; - got_Z = 0; - ++ts; - break; - } - CYTHON_FALLTHROUGH; - case 's': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - case '(': - if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; - break; - default: - { - int number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - ctx->new_count = (size_t)number; - } - } - } -} - -/* TypeInfoCompare */ - static int -__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) -{ - int i; - if (!a || !b) - return 0; - if (a == b) - return 1; - if (a->size != b->size || a->typegroup != b->typegroup || - a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { - if (a->typegroup == 'H' || b->typegroup == 'H') { - return a->size == b->size; - } else { - return 0; - } - } - if (a->ndim) { - for (i = 0; i < a->ndim; i++) - if (a->arraysize[i] != b->arraysize[i]) - return 0; - } - if (a->typegroup == 'S') { - if (a->flags != b->flags) - return 0; - if (a->fields || b->fields) { - if (!(a->fields && b->fields)) - return 0; - for (i = 0; a->fields[i].type && b->fields[i].type; i++) { - __Pyx_StructField *field_a = a->fields + i; - __Pyx_StructField *field_b = b->fields + i; - if (field_a->offset != field_b->offset || - !__pyx_typeinfo_cmp(field_a->type, field_b->type)) - return 0; - } - return !a->fields[i].type && !b->fields[i].type; - } - } - return 1; -} - -/* MemviewSliceValidateAndInit */ - static int -__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) -{ - if (buf->shape[dim] <= 1) - return 1; - if (buf->strides) { - if (spec & __Pyx_MEMVIEW_CONTIG) { - if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { - if (unlikely(buf->strides[dim] != sizeof(void *))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly contiguous " - "in dimension %d.", dim); - goto fail; - } - } else if (unlikely(buf->strides[dim] != buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_FOLLOW) { - Py_ssize_t stride = buf->strides[dim]; - if (stride < 0) - stride = -stride; - if (unlikely(stride < buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - } else { - if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not contiguous in " - "dimension %d", dim); - goto fail; - } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not indirect in " - "dimension %d", dim); - goto fail; - } else if (unlikely(buf->suboffsets)) { - PyErr_SetString(PyExc_ValueError, - "Buffer exposes suboffsets but no strides"); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) -{ - if (spec & __Pyx_MEMVIEW_DIRECT) { - if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { - PyErr_Format(PyExc_ValueError, - "Buffer not compatible with direct access " - "in dimension %d.", dim); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_PTR) { - if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly accessible " - "in dimension %d.", dim); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) -{ - int i; - if (c_or_f_flag & __Pyx_IS_F_CONTIG) { - Py_ssize_t stride = 1; - for (i = 0; i < ndim; i++) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not fortran contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { - Py_ssize_t stride = 1; - for (i = ndim - 1; i >- 1; i--) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not C contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } - return 1; -fail: - return 0; -} -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj) -{ - struct __pyx_memoryview_obj *memview, *new_memview; - __Pyx_RefNannyDeclarations - Py_buffer *buf; - int i, spec = 0, retval = -1; - __Pyx_BufFmt_Context ctx; - int from_memoryview = __pyx_memoryview_check(original_obj); - __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); - if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) - original_obj)->typeinfo)) { - memview = (struct __pyx_memoryview_obj *) original_obj; - new_memview = NULL; - } else { - memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - original_obj, buf_flags, 0, dtype); - new_memview = memview; - if (unlikely(!memview)) - goto fail; - } - buf = &memview->view; - if (unlikely(buf->ndim != ndim)) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - ndim, buf->ndim); - goto fail; - } - if (new_memview) { - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; - } - if (unlikely((unsigned) buf->itemsize != dtype->size)) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " - "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", - buf->itemsize, - (buf->itemsize > 1) ? "s" : "", - dtype->name, - dtype->size, - (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->len > 0) { - for (i = 0; i < ndim; i++) { - spec = axes_specs[i]; - if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) - goto fail; - if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) - goto fail; - } - if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) - goto fail; - } - if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, - new_memview != NULL) == -1)) { - goto fail; - } - retval = 0; - goto no_fail; -fail: - Py_XDECREF(new_memview); - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, - &__Pyx_TypeInfo_int, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, - &__Pyx_TypeInfo_float, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, - &__Pyx_TypeInfo_int, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { - const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(int) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(int) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(int) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(int), - little, !is_unsigned); - } -} - -/* CIntFromPyVerify */ - #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { - const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* MemviewSliceCopyTemplate */ - static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object) -{ - __Pyx_RefNannyDeclarations - int i; - __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; - struct __pyx_memoryview_obj *from_memview = from_mvs->memview; - Py_buffer *buf = &from_memview->view; - PyObject *shape_tuple = NULL; - PyObject *temp_int = NULL; - struct __pyx_array_obj *array_obj = NULL; - struct __pyx_memoryview_obj *memview_obj = NULL; - __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); - for (i = 0; i < ndim; i++) { - if (unlikely(from_mvs->suboffsets[i] >= 0)) { - PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " - "indirect dimensions (axis %d)", i); - goto fail; - } - } - shape_tuple = PyTuple_New(ndim); - if (unlikely(!shape_tuple)) { - goto fail; - } - __Pyx_GOTREF(shape_tuple); - for(i = 0; i < ndim; i++) { - temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); - if(unlikely(!temp_int)) { - goto fail; - } else { - PyTuple_SET_ITEM(shape_tuple, i, temp_int); - temp_int = NULL; - } - } - array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); - if (unlikely(!array_obj)) { - goto fail; - } - __Pyx_GOTREF(array_obj); - memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - (PyObject *) array_obj, contig_flag, - dtype_is_object, - from_mvs->memview->typeinfo); - if (unlikely(!memview_obj)) - goto fail; - if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) - goto fail; - if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, - dtype_is_object) < 0)) - goto fail; - goto no_fail; -fail: - __Pyx_XDECREF(new_mvs.memview); - new_mvs.memview = NULL; - new_mvs.data = NULL; -no_fail: - __Pyx_XDECREF(shape_tuple); - __Pyx_XDECREF(temp_int); - __Pyx_XDECREF(array_obj); - __Pyx_RefNannyFinishContext(); - return new_mvs; -} - -/* CIntFromPy */ - static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { - const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(int) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(int) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) - case -2: - if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } -#endif - if (sizeof(int) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { - const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(long) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(long) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) - case -2: - if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } -#endif - if (sizeof(long) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { - const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(char) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (char) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (char) 0; - case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) - case 2: - if (8 * sizeof(char) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { - return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(char) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { - return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(char) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { - return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (char) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(char) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (char) 0; - case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) - case -2: - if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(char) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(char) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(char) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { - return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - } -#endif - if (sizeof(char) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - char val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (char) -1; - } - } else { - char val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (char) -1; - val = __Pyx_PyInt_As_char(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to char"); - return (char) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to char"); - return (char) -1; -} - -/* CheckBinaryVersion */ - static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - return PyErr_WarnEx(NULL, message, 1); - } - return 0; -} - -/* InitStrings */ - static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - ++t; - } - return 0; -} - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type %.200s). " - "The ability to return an instance of a strict subclass of int " - "is deprecated, and may be removed in a future version of Python.", - Py_TYPE(result)->tp_name)) { - Py_DECREF(result); - return NULL; - } - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type %.200s)", - type_name, type_name, Py_TYPE(result)->tp_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -#endif /* Py_PYTHON_H */ diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/dev/packaging/gen_install_table.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/dev/packaging/gen_install_table.py deleted file mode 100644 index b4c852dc53de613707b9668f748184c2b63b9dea..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/dev/packaging/gen_install_table.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. -# -*- coding: utf-8 -*- - -import argparse - -template = """
install
\
-python -m pip install detectron2{d2_version} -f \\
-  https://dl.fbaipublicfiles.com/detectron2/wheels/{cuda}/torch{torch}/index.html
-
""" -CUDA_SUFFIX = { - "11.3": "cu113", - "11.1": "cu111", - "11.0": "cu110", - "10.2": "cu102", - "10.1": "cu101", - "10.0": "cu100", - "9.2": "cu92", - "cpu": "cpu", -} - - -def gen_header(torch_versions): - return '' + "".join( - [ - ''.format(t) - for t in torch_versions - ] - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--d2-version", help="detectron2 version number, default to empty") - args = parser.parse_args() - d2_version = f"=={args.d2_version}" if args.d2_version else "" - - all_versions = ( - [("1.8", k) for k in ["11.1", "10.2", "10.1", "cpu"]] - + [("1.9", k) for k in ["11.1", "10.2", "cpu"]] - + [("1.10", k) for k in ["11.3", "11.1", "10.2", "cpu"]] - ) - - torch_versions = sorted( - {k[0] for k in all_versions}, key=lambda x: int(x.split(".")[1]), reverse=True - ) - cuda_versions = sorted( - {k[1] for k in all_versions}, key=lambda x: float(x) if x != "cpu" else 0, reverse=True - ) - - table = gen_header(torch_versions) - for cu in cuda_versions: - table += f""" """ - cu_suffix = CUDA_SUFFIX[cu] - for torch in torch_versions: - if (torch, cu) in all_versions: - cell = template.format(d2_version=d2_version, cuda=cu_suffix, torch=torch) - else: - cell = "" - table += f""" """ - table += "" - table += "
CUDA torch {}
{cu}{cell}
" - print(table) diff --git a/spaces/cbr/swp/face_parsing/swap.py b/spaces/cbr/swp/face_parsing/swap.py deleted file mode 100644 index 644a49029d21a68514ce23e5ab6c341b55f96351..0000000000000000000000000000000000000000 --- a/spaces/cbr/swp/face_parsing/swap.py +++ /dev/null @@ -1,134 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision.transforms as transforms -import cv2 -import numpy as np - -from .model import BiSeNet - -mask_regions = { - "Background":0, - "Skin":1, - "L-Eyebrow":2, - "R-Eyebrow":3, - "L-Eye":4, - "R-Eye":5, - "Eye-G":6, - "L-Ear":7, - "R-Ear":8, - "Ear-R":9, - "Nose":10, - "Mouth":11, - "U-Lip":12, - "L-Lip":13, - "Neck":14, - "Neck-L":15, - "Cloth":16, - "Hair":17, - "Hat":18 -} - -# Borrowed from simswap -# https://github.com/neuralchen/SimSwap/blob/26c84d2901bd56eda4d5e3c5ca6da16e65dc82a6/util/reverse2original.py#L30 -class SoftErosion(nn.Module): - def __init__(self, kernel_size=15, threshold=0.6, iterations=1): - super(SoftErosion, self).__init__() - r = kernel_size // 2 - self.padding = r - self.iterations = iterations - self.threshold = threshold - - # Create kernel - y_indices, x_indices = torch.meshgrid(torch.arange(0., kernel_size), torch.arange(0., kernel_size)) - dist = torch.sqrt((x_indices - r) ** 2 + (y_indices - r) ** 2) - kernel = dist.max() - dist - kernel /= kernel.sum() - kernel = kernel.view(1, 1, *kernel.shape) - self.register_buffer('weight', kernel) - - def forward(self, x): - x = x.float() - for i in range(self.iterations - 1): - x = torch.min(x, F.conv2d(x, weight=self.weight, groups=x.shape[1], padding=self.padding)) - x = F.conv2d(x, weight=self.weight, groups=x.shape[1], padding=self.padding) - - mask = x >= self.threshold - x[mask] = 1.0 - x[~mask] /= x[~mask].max() - - return x, mask - -device = "cpu" - -def init_parser(pth_path, mode="cpu"): - global device - device = mode - n_classes = 19 - net = BiSeNet(n_classes=n_classes) - if device == "cuda": - net.cuda() - net.load_state_dict(torch.load(pth_path)) - else: - net.load_state_dict(torch.load(pth_path, map_location=torch.device('cpu'))) - net.eval() - return net - - -def image_to_parsing(img, net): - img = cv2.resize(img, (512, 512)) - img = img[:,:,::-1] - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) - ]) - img = transform(img.copy()) - img = torch.unsqueeze(img, 0) - - with torch.no_grad(): - img = img.to(device) - out = net(img)[0] - parsing = out.squeeze(0).cpu().numpy().argmax(0) - return parsing - - -def get_mask(parsing, classes): - res = parsing == classes[0] - for val in classes[1:]: - res += parsing == val - return res - -def swap_regions(source, target, net, smooth_mask, includes=[1,2,3,4,5,10,11,12,13], blur=10): - parsing = image_to_parsing(source, net) - - if len(includes) == 0: - return source, np.zeros_like(source) - - include_mask = get_mask(parsing, includes) - mask = np.repeat(include_mask[:, :, np.newaxis], 3, axis=2).astype("float32") - - if smooth_mask is not None: - mask_tensor = torch.from_numpy(mask.copy().transpose((2, 0, 1))).float().to(device) - face_mask_tensor = mask_tensor[0] + mask_tensor[1] - soft_face_mask_tensor, _ = smooth_mask(face_mask_tensor.unsqueeze_(0).unsqueeze_(0)) - soft_face_mask_tensor.squeeze_() - mask = np.repeat(soft_face_mask_tensor.cpu().numpy()[:, :, np.newaxis], 3, axis=2) - - if blur > 0: - mask = cv2.GaussianBlur(mask, (0, 0), blur) - - resized_source = cv2.resize((source/255).astype("float32"), (512, 512)) - resized_target = cv2.resize((target/255).astype("float32"), (512, 512)) - - result = mask * resized_source + (1 - mask) * resized_target - normalized_result = (result - np.min(result)) / (np.max(result) - np.min(result)) - result = cv2.resize((result*255).astype("uint8"), (source.shape[1], source.shape[0])) - - return result - -def mask_regions_to_list(values): - out_ids = [] - for value in values: - if value in mask_regions.keys(): - out_ids.append(mask_regions.get(value)) - return out_ids diff --git a/spaces/cccc-c/web-ui-pub/_next/static/chunks/ff48af57.9bd46c4f54ef29df.js b/spaces/cccc-c/web-ui-pub/_next/static/chunks/ff48af57.9bd46c4f54ef29df.js deleted file mode 100644 index 8a73871624398bc94d1c8bfbed822e88ae459e11..0000000000000000000000000000000000000000 --- a/spaces/cccc-c/web-ui-pub/_next/static/chunks/ff48af57.9bd46c4f54ef29df.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[680],{48136:function(a,t,h){h.d(t,{etG:function(){return n}});var r=h(83270);function n(a){return(0,r.w_)({tag:"svg",attr:{fill:"currentColor",viewBox:"0 0 16 16"},child:[{tag:"path",attr:{d:"M4 1.5H3a2 2 0 0 0-2 2V14a2 2 0 0 0 2 2h10a2 2 0 0 0 2-2V3.5a2 2 0 0 0-2-2h-1v1h1a1 1 0 0 1 1 1V14a1 1 0 0 1-1 1H3a1 1 0 0 1-1-1V3.5a1 1 0 0 1 1-1h1v-1z"}},{tag:"path",attr:{d:"M9.5 1a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-.5.5h-3a.5.5 0 0 1-.5-.5v-1a.5.5 0 0 1 .5-.5h3zm-3-1A1.5 1.5 0 0 0 5 1.5v1A1.5 1.5 0 0 0 6.5 4h3A1.5 1.5 0 0 0 11 2.5v-1A1.5 1.5 0 0 0 9.5 0h-3z"}}]})(a)}}}]); \ No newline at end of file diff --git a/spaces/celise88/Pathfinder/templates/job_list.html b/spaces/celise88/Pathfinder/templates/job_list.html deleted file mode 100644 index 729f9e715977bc43759a0e35ac4c417f596ab82a..0000000000000000000000000000000000000000 --- a/spaces/celise88/Pathfinder/templates/job_list.html +++ /dev/null @@ -1,117 +0,0 @@ - - - - - - - Dashboard - - - - -
-

Job Information Center

-

Welcome to Pathfinder! Select a job from the dropdown menu below to begin your journey!

-
-
-

- - -

- -
-
-
-

{{ jobtitle }}

-

{{ jobdescription }}

-
- {% if jobtitle %} -

About the Job

- {% endif %} - {% if tasks %} -
-

Work Tasks:

-
    - {% for task in tasks %} -
  • {{ task }}
  • - {% endfor %} -
-
- {% endif %} - {% if activities %} -
-

Other Work Activities:

-

{{ activities }}

-
- {% endif %} - {% if conditions %} -
-

Work Conditions:

-

{{ conditions }}

-
- {% endif %} - {% if compensation %} -
-

Compensation:

-

{{ compensation }}

-
- {% endif %} - {% if jobtitle %} -

About the Qualified Candidate

-

We're sorry! This section is currently under construction. Please check back soon to see the characteristics necessary to be great {{ jobtitle }}.

- {% endif %} - {% if preparation %} -
-

Has Degree(s) &/or License(s)/Certification(s) in:

-

{{ preparation }}

-
- {% endif %} - {% if knowledge %} -
-

Knows About:

-

{{ knowledge }}

-
- {% endif %} - {% if skills %} -
-

Is Skilled at:

-

{{ skills }}

-
- {% endif %} - {% if abilities %} -
-

Is Able to:

-

{{ abilities }}

-
- {% endif %} - {% if interests %} -
-

Is Interested in:

-

{{ interests }}

-
- {% endif %} -
- - - \ No newline at end of file diff --git a/spaces/chatarena/chatarena-demo/app.py b/spaces/chatarena/chatarena-demo/app.py deleted file mode 100644 index c6191b89b5674b960ae612b5bc82b8370ac602a4..0000000000000000000000000000000000000000 --- a/spaces/chatarena/chatarena-demo/app.py +++ /dev/null @@ -1,398 +0,0 @@ -import re -import json -import gradio as gr -from glob import glob - -from chatarena.arena import Arena, TooManyInvalidActions -from chatarena.backends import BACKEND_REGISTRY -from chatarena.backends.human import HumanBackendError -from chatarena.config import ArenaConfig -from chatarena.environments import ENV_REGISTRY -from chatarena.database import log_arena, log_messages, SupabaseDB, supabase_available -from chatarena.message import Message - -css = """#col-container {max-width: 90%; margin-left: auto; margin-right: auto; display: flex; flex-direction: column;} -#header {text-align: center;} -#col-chatbox {flex: 1; max-height: min(750px, 100%);} -#label {font-size: 2em; padding: 0.5em; margin: 0;} -.message {font-size: 1.2em;} -.message-wrap {max-height: min(700px, 100vh);} -""" -# .wrap {min-width: min(640px, 100vh)} -# #env-desc {max-height: 100px; overflow-y: auto;} -# .textarea {height: 100px; max-height: 100px;} -# #chatbot-tab-all {height: 750px; max-height: min(750px, 100%);} -# #chatbox {height: min(750px, 100%); max-height: min(750px, 100%);} -# #chatbox.block {height: 730px} -# .wrap {max-height: 680px;} -# .scroll-hide {overflow-y: scroll; max-height: 100px;} - - -DEBUG = False - -DEFAULT_BACKEND = "openai-chat" -DEFAULT_ENV = "conversation" -MAX_NUM_PLAYERS = 6 -DEFAULT_NUM_PLAYERS = 2 - - -def load_examples(): - example_configs = {} - # Load json config files from examples folder - example_files = glob("examples/*.json") - for example_file in example_files: - with open(example_file, 'r') as f: - example = json.load(f) - example_configs[example["name"]] = example - return example_configs - - -EXAMPLE_REGISTRY = load_examples() - -DB = SupabaseDB() if supabase_available else None - - -def get_moderator_components(visible=True): - name = "Moderator" - with gr.Row(): - with gr.Column(): - role_desc = gr.Textbox(label="Moderator role", lines=1, visible=visible, interactive=True, - placeholder=f"Enter the role description for {name}") - terminal_condition = gr.Textbox(show_label=False, lines=1, visible=visible, interactive=True, - placeholder="Enter the end criteria for the conversation") - with gr.Column(): - backend_type = gr.Dropdown(show_label=False, visible=visible, interactive=True, - choices=list(BACKEND_REGISTRY.keys()), value=DEFAULT_BACKEND) - with gr.Accordion(f"{name} Parameters", open=False, visible=visible) as accordion: - temperature = gr.Slider(minimum=0, maximum=2.0, step=0.1, interactive=True, visible=visible, - label=f"temperature", value=0.7) - max_tokens = gr.Slider(minimum=10, maximum=500, step=10, interactive=True, visible=visible, - label=f"max tokens", value=200) - - return [role_desc, terminal_condition, backend_type, accordion, temperature, max_tokens] - - -def get_player_components(name, visible): - with gr.Row(): - with gr.Column(): - role_desc = gr.Textbox(label=name, lines=3, interactive=True, visible=visible, - placeholder=f"Enter the role description for {name}") - with gr.Column(): - backend_type = gr.Dropdown(show_label=False, choices=list(BACKEND_REGISTRY.keys()), - interactive=True, visible=visible, value=DEFAULT_BACKEND) - with gr.Accordion(f"{name} Parameters", open=False, visible=visible) as accordion: - temperature = gr.Slider(minimum=0, maximum=2.0, step=0.1, interactive=True, visible=visible, - label=f"temperature", value=0.7) - max_tokens = gr.Slider(minimum=10, maximum=500, step=10, interactive=True, visible=visible, - label=f"max tokens", value=200) - - return [role_desc, backend_type, accordion, temperature, max_tokens] - - -def get_empty_state(): - return gr.State({"arena": None}) - - -with gr.Blocks(css=css) as demo: - state = get_empty_state() - all_components = [] - - with gr.Column(elem_id="col-container"): - gr.Markdown("""# 🏟 ChatArena️
-Prompting multiple AI agents to play games in a language-driven environment. -**[Project Homepage](https://www.chatarena.org)**""", elem_id="header") - - with gr.Row(): - env_selector = gr.Dropdown(choices=list(ENV_REGISTRY.keys()), value=DEFAULT_ENV, interactive=True, - label="Environment Type", show_label=True) - example_selector = gr.Dropdown(choices=list(EXAMPLE_REGISTRY.keys()), interactive=True, - label="Select Example", show_label=True) - - # Environment configuration - env_desc_textbox = gr.Textbox(show_label=True, lines=2, visible=True, label="Environment Description", - placeholder="Enter a description of a scenario or the game rules.") - - all_components += [env_selector, example_selector, env_desc_textbox] - - with gr.Row(): - with gr.Column(elem_id="col-chatbox"): - with gr.Tab("All", visible=True): - chatbot = gr.Chatbot( - elem_id="chatbox", visible=True, show_label=False) - - player_chatbots = [] - for i in range(MAX_NUM_PLAYERS): - player_name = f"Player {i + 1}" - with gr.Tab(player_name, visible=(i < DEFAULT_NUM_PLAYERS)): - player_chatbot = gr.Chatbot(elem_id=f"chatbox-{i}", visible=i < DEFAULT_NUM_PLAYERS, - label=player_name, show_label=False) - player_chatbots.append(player_chatbot) - - all_components += [chatbot, *player_chatbots] - - with gr.Column(elem_id="col-config"): # Player Configuration - # gr.Markdown("Player Configuration") - parallel_checkbox = gr.Checkbox( - label="Parallel Actions", value=False, visible=True) - with gr.Accordion("Moderator", open=False, visible=True): - moderator_components = get_moderator_components(True) - all_components += [parallel_checkbox, *moderator_components] - - all_players_components, players_idx2comp = [], {} - with gr.Blocks(): - num_player_slider = gr.Slider(2, MAX_NUM_PLAYERS, value=DEFAULT_NUM_PLAYERS, step=1, - label="Number of players:") - for i in range(MAX_NUM_PLAYERS): - player_name = f"Player {i + 1}" - with gr.Tab(player_name, visible=(i < DEFAULT_NUM_PLAYERS)) as tab: - player_comps = get_player_components( - player_name, visible=(i < DEFAULT_NUM_PLAYERS)) - - players_idx2comp[i] = player_comps + [tab] - all_players_components += player_comps + [tab] - - all_components += [num_player_slider] + all_players_components - - def variable_players(k): - k = int(k) - update_dict = {} - for i in range(MAX_NUM_PLAYERS): - if i < k: - for comp in players_idx2comp[i]: - update_dict[comp] = gr.update(visible=True) - update_dict[player_chatbots[i] - ] = gr.update(visible=True) - else: - for comp in players_idx2comp[i]: - update_dict[comp] = gr.update(visible=False) - update_dict[player_chatbots[i] - ] = gr.update(visible=False) - return update_dict - - num_player_slider.change( - variable_players, num_player_slider, all_players_components + player_chatbots) - - human_input_textbox = gr.Textbox(show_label=True, label="Human Input", lines=1, visible=True, - interactive=True, placeholder="Enter your input here") - with gr.Row(): - btn_step = gr.Button("Start") - btn_restart = gr.Button("Clear") - - all_components += [human_input_textbox, btn_step, btn_restart] - - def _convert_to_chatbot_output(all_messages, display_recv=False): - chatbot_output = [] - for i, message in enumerate(all_messages): - agent_name, msg, recv = message.agent_name, message.content, str( - message.visible_to) - # Preprocess message for chatbot output - new_msg = re.sub(r'\n+', '
', msg.strip()) - if display_recv: - # Add role to the message - new_msg = f"**{agent_name} (-> {recv})**: {new_msg}" - else: - new_msg = f"**{agent_name}**: {new_msg}" - - if agent_name == "Moderator": - chatbot_output.append((new_msg, None)) - else: - chatbot_output.append((None, new_msg)) - return chatbot_output - - def _create_arena_config_from_components(all_comps: dict) -> ArenaConfig: - env_desc = all_comps[env_desc_textbox] - - # Initialize the players - num_players = all_comps[num_player_slider] - player_configs = [] - for i in range(num_players): - player_name = f"Player {i + 1}" - role_desc, backend_type, temperature, max_tokens = [ - all_comps[c] for c in players_idx2comp[i] if not isinstance(c, (gr.Accordion, gr.Tab))] - player_config = { - "name": player_name, - "role_desc": role_desc, - "global_prompt": env_desc, - "backend": { - "backend_type": backend_type, - "temperature": temperature, - "max_tokens": max_tokens - } - } - player_configs.append(player_config) - - # Initialize the environment - env_type = all_comps[env_selector] - # Get moderator config - mod_role_desc, mod_terminal_condition, moderator_backend_type, mod_temp, mod_max_tokens = [ - all_comps[c] for c in moderator_components if not isinstance(c, (gr.Accordion, gr.Tab))] - moderator_config = { - "role_desc": mod_role_desc, - "global_prompt": env_desc, - "terminal_condition": mod_terminal_condition, - "backend": { - "backend_type": moderator_backend_type, - "temperature": mod_temp, - "max_tokens": mod_max_tokens - } - } - env_config = { - "env_type": env_type, - "parallel": all_comps[parallel_checkbox], - "moderator": moderator_config, - "moderator_visibility": "all", - "moderator_period": "turn" - } - - # arena_config = {"players": player_configs, "environment": env_config} - arena_config = ArenaConfig( - players=player_configs, environment=env_config) - return arena_config - - def step_game(all_comps: dict): - yield {btn_step: gr.update(value="Running...", interactive=False), - btn_restart: gr.update(interactive=False)} - - cur_state = all_comps[state] - - # If arena is not yet created, create it - if cur_state["arena"] is None: - # Create the Arena - arena_config = _create_arena_config_from_components(all_comps) - arena = Arena.from_config(arena_config) - log_arena(arena, database=DB) - cur_state["arena"] = arena - else: - arena = cur_state["arena"] - - try: - timestep = arena.step() - except HumanBackendError as e: - # Handle human input and recover with the game update - human_input = all_comps[human_input_textbox] - if human_input == "": - timestep = None # Failed to get human input - else: - timestep = arena.environment.step(e.agent_name, human_input) - except TooManyInvalidActions as e: - timestep = arena.current_timestep - timestep.observation.append( - Message("System", "Too many invalid actions. Game over.", turn=-1, visible_to="all")) - timestep.terminal = True - - if timestep is None: - yield {human_input_textbox: gr.update(value="", placeholder="Please enter a valid input"), - btn_step: gr.update(value="Next Step", interactive=True), - btn_restart: gr.update(interactive=True)} - else: - all_messages = timestep.observation # user sees what the moderator sees - log_messages(arena, all_messages, database=DB) - - chatbot_output = _convert_to_chatbot_output( - all_messages, display_recv=True) - update_dict = {human_input_textbox: gr.Textbox.update(value=""), - chatbot: chatbot_output, - btn_step: gr.update(value="Next Step", interactive=not timestep.terminal), - btn_restart: gr.update(interactive=True), state: cur_state} - # Get the visible messages for each player - for i, player in enumerate(arena.players): - player_messages = arena.environment.get_observation( - player.name) - player_output = _convert_to_chatbot_output(player_messages) - # Update the player's chatbot output - update_dict[player_chatbots[i]] = player_output - - if DEBUG: - arena.environment.print() - - yield update_dict - - def restart_game(all_comps: dict): - cur_state = all_comps[state] - cur_state["arena"] = None - yield {chatbot: [], btn_restart: gr.update(interactive=False), - btn_step: gr.update(interactive=False), state: cur_state} - - arena_config = _create_arena_config_from_components(all_comps) - arena = Arena.from_config(arena_config) - log_arena(arena, database=DB) - cur_state["arena"] = arena - - yield {btn_step: gr.update(value="Start", interactive=True), - btn_restart: gr.update(interactive=True), state: cur_state} - - # Remove Accordion and Tab from the list of components - all_components = [comp for comp in all_components if not isinstance( - comp, (gr.Accordion, gr.Tab))] - - # If any of the Textbox, Slider, Checkbox, Dropdown, RadioButtons is changed, the Step button is disabled - for comp in all_components: - def _disable_step_button(state): - if state["arena"] is not None: - return gr.update(interactive=False) - else: - return gr.update() - - if isinstance(comp, - (gr.Textbox, gr.Slider, gr.Checkbox, gr.Dropdown, gr.Radio)) and comp is not human_input_textbox: - comp.change(_disable_step_button, state, btn_step) - - btn_step.click(step_game, set(all_components + [state]), - [chatbot, *player_chatbots, btn_step, btn_restart, state, human_input_textbox]) - btn_restart.click(restart_game, set(all_components + [state]), - [chatbot, *player_chatbots, btn_step, btn_restart, state, human_input_textbox]) - - # If an example is selected, update the components - - def update_components_from_example(all_comps: dict): - example_name = all_comps[example_selector] - example_config = EXAMPLE_REGISTRY[example_name] - update_dict = {} - - # Update the environment components - env_config = example_config['environment'] - update_dict[env_desc_textbox] = gr.update( - value=example_config['global_prompt']) - update_dict[env_selector] = gr.update(value=env_config['env_type']) - update_dict[parallel_checkbox] = gr.update( - value=env_config['parallel']) - - # Update the moderator components - if "moderator" in env_config: - mod_role_desc, mod_terminal_condition, moderator_backend_type, mod_temp, mod_max_tokens = [ - c for c in moderator_components if not isinstance(c, (gr.Accordion, gr.Tab)) - ] - update_dict[mod_role_desc] = gr.update( - value=env_config['moderator']['role_desc']) - update_dict[mod_terminal_condition] = gr.update( - value=env_config['moderator']['terminal_condition']) - update_dict[moderator_backend_type] = gr.update( - value=env_config['moderator']['backend']['backend_type']) - update_dict[mod_temp] = gr.update( - value=env_config['moderator']['backend']['temperature']) - update_dict[mod_max_tokens] = gr.update( - value=env_config['moderator']['backend']['max_tokens']) - - # Update the player components - update_dict[num_player_slider] = gr.update( - value=len(example_config['players'])) - for i, player_config in enumerate(example_config['players']): - role_desc, backend_type, temperature, max_tokens = [ - c for c in players_idx2comp[i] if not isinstance(c, (gr.Accordion, gr.Tab)) - ] - update_dict[role_desc] = gr.update( - value=player_config['role_desc']) - update_dict[backend_type] = gr.update( - value=player_config['backend']['backend_type']) - update_dict[temperature] = gr.update( - value=player_config['backend']['temperature']) - update_dict[max_tokens] = gr.update( - value=player_config['backend']['max_tokens']) - - return update_dict - - example_selector.change(update_components_from_example, set( - all_components + [state]), all_components + [state]) - -demo.queue() -demo.launch(debug=DEBUG) diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/distillation.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/distillation.py deleted file mode 100644 index 323f62bf45812e6a501c327438dd0b05bedae80b..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/distillation.py +++ /dev/null @@ -1,310 +0,0 @@ -#!/usr/bin/env python - -import argparse -import gc -import os -import sys -from pathlib import Path -from typing import List # noqa: F401 - -import pytorch_lightning as pl -import torch -from finetune import SummarizationModule, TranslationModule -from finetune import main as ft_main -from make_student import create_student_by_copying_alternating_layers, get_layers_to_supervise -from torch import nn - -from transformers import AutoModelForSeq2SeqLM, MBartTokenizer, T5ForConditionalGeneration -from transformers.models.bart.modeling_bart import shift_tokens_right -from utils import calculate_bleu, check_output_dir, freeze_params, label_smoothed_nll_loss, use_task_specific_params - - -# need the parent dir module -sys.path.insert(2, str(Path(__file__).resolve().parents[1])) -from lightning_base import generic_train # noqa - - -class SummarizationDistiller(SummarizationModule): - """Supports T5, Bart, Pegasus and other models that inherit from Bart.""" - - loss_names = ["loss", "ce_loss", "mlm_loss", "hid_loss_enc", "hid_loss_dec"] - - def __init__(self, hparams): - assert Path(hparams.data_dir).exists() - self.output_dir = Path(hparams.output_dir) - self.output_dir.mkdir(exist_ok=True) - - save_dir = self.output_dir.joinpath("student") - - hparams.model_name_or_path = str(save_dir) # Tell lightning we are training the student - teacher = AutoModelForSeq2SeqLM.from_pretrained(hparams.teacher).eval() - use_task_specific_params(teacher, hparams.task) # We copy good generation parameters to student by default - if hparams.student is not None: - student = AutoModelForSeq2SeqLM.from_pretrained(hparams.student) - use_task_specific_params(student, hparams.task) - e_layer_ids, d_layer_ids = None, None - else: - student, e_layer_ids, d_layer_ids = create_student_by_copying_alternating_layers( - teacher, e=hparams.student_encoder_layers, d=hparams.student_decoder_layers, save_path=save_dir - ) - - if hparams.length_penalty != -1: - student.config.length_penalty = hparams.length_penalty - hparams.tokenizer_name = hparams.teacher # Use teacher's tokenizer - super().__init__(hparams, model=student, config=student.config) - assert student.config.model_type == teacher.config.model_type, ( - f"teacher, student model types should be the same, got {student.config.model_type} !=" - f" {teacher.config.model_type}" - ) - - if student.config.model_type == "t5": - student_encoder_layers = len(student.get_encoder().block) - student_decoder_layers = len(student.get_decoder().block) - teacher_encoder_layers = len(teacher.get_encoder().block) - teacher_decoder_layers = len(teacher.get_decoder().block) - else: - student_encoder_layers = student.config.encoder_layers - student_decoder_layers = student.config.decoder_layers - teacher_encoder_layers = teacher.config.encoder_layers - teacher_decoder_layers = teacher.config.decoder_layers - - self.different_base_models = not (hparams.student is None or hparams.teacher == hparams.student) - self.do_calc_hidden_loss = (not self.different_base_models) and hparams.alpha_hid > 0 - self.different_encoder = self.different_base_models or (student_encoder_layers != teacher_encoder_layers) - # self.different_encoder determines whether we need to run the teacher encoder - self.teacher = teacher - freeze_params(self.teacher) - - if not self.different_encoder: # To save RAM, delete teacher encoder and freeze student encoder. - try: - del self.teacher.model.encoder - except AttributeError: # T5 - del self.teacher.encoder - - if e_layer_ids is None: - e_layer_ids = list(range(student_encoder_layers)) - if d_layer_ids is None: - d_layer_ids = list(range(student_decoder_layers)) - - self.e_layer_ids, self.d_layer_ids = e_layer_ids, d_layer_ids # type: List[int], List[int] - - if self.do_calc_hidden_loss: # Intermediate supervision: Decide which layers to supervise - if hparams.supervise_forward: - self.e_matches = get_layers_to_supervise( - n_student=len(self.e_layer_ids), n_teacher=teacher_encoder_layers - ) - self.d_matches = get_layers_to_supervise( - n_student=len(self.d_layer_ids), n_teacher=teacher_decoder_layers - ) - else: # student layer should emulate hidden states of the teacher layer it was copied from - self.e_matches = self.e_layer_ids - self.d_matches = self.d_layer_ids - else: - self.e_matches = None - self.d_matches = None - - self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") - self.temperature = 2.0 - self.alpha_mlm = hparams.alpha_mlm - self.alpha_ce = hparams.alpha_ce - self.alpha_hid = hparams.alpha_hid - gc.collect() - torch.cuda.empty_cache() - - def calc_ce_loss(self, mask, s_logits, t_logits): - """Copy pasted from distillbert (transformers/examples/distillation/)""" - # mask has False at padding_idx - sel_mask = mask[:, :, None].expand_as(s_logits) - vocab_size = s_logits.size(-1) - s_logits_slct = torch.masked_select(s_logits, sel_mask) # (bs * seq_length * voc_size) modulo the 1s in mask - t_logits_slct = torch.masked_select(t_logits, sel_mask) # (bs * seq_length * voc_size) modulo the 1s in mask - s_logits_slct = s_logits_slct.view(-1, vocab_size) # (bs * seq_length, voc_size) modulo the 1s in mask - t_logits_slct = t_logits_slct.view(-1, vocab_size) # (bs * seq_length, voc_size) modulo the 1s in mask - assert t_logits_slct.size() == s_logits_slct.size() - loss_ce = ( - self.ce_loss_fct( - nn.functional.log_softmax(s_logits_slct / self.temperature, dim=-1), - nn.functional.softmax(t_logits_slct / self.temperature, dim=-1), - ) - * (self.temperature) ** 2 - ) - return loss_ce - - @staticmethod - def add_model_specific_args(parser, root_dir): - SummarizationModule.add_model_specific_args(parser, root_dir) - add_distill_args(parser) - return parser - - def _step(self, batch: dict) -> tuple: - """Compute the loss for a batch""" - pad_token_id = self.tokenizer.pad_token_id - input_ids, src_mask, labels = batch["input_ids"], batch["attention_mask"], batch["labels"] - if isinstance(self.model, T5ForConditionalGeneration): - decoder_input_ids = self.model._shift_right(labels) - else: - decoder_input_ids = shift_tokens_right(labels, pad_token_id) - - # noinspection PyCallingNonCallable - student_outputs = self( - input_ids, - attention_mask=src_mask, - decoder_input_ids=decoder_input_ids, - output_hidden_states=self.do_calc_hidden_loss, - output_attentions=False, - use_cache=False, - ) - lm_logits = student_outputs["logits"] - - # Same cross entropy vs. label smoothing logic as finetune.py - assert lm_logits.shape[-1] == self.model.config.vocab_size - if self.hparams.label_smoothing == 0: - # Same behavior as modeling_bart.py, besides ignoring pad_token_id - loss_fct = nn.CrossEntropyLoss(ignore_index=pad_token_id) - student_lm_loss = loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), labels.view(-1)) - else: - lprobs = nn.functional.log_softmax(lm_logits, dim=-1) - student_lm_loss, _ = label_smoothed_nll_loss( - lprobs, labels, self.hparams.label_smoothing, ignore_index=pad_token_id - ) - - def zero_tensor(): - return torch.tensor(0.0).type_as(student_lm_loss) - - teacher_enc_outputs = student_outputs[ - "encoder_last_hidden_state" - ] # use this unless self.different_base_models - hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor() - if self.different_encoder: # compute encoder hidden state loss - all_teacher_encoder_outputs = self.teacher.get_encoder()( - input_ids, - attention_mask=src_mask, - output_hidden_states=self.do_calc_hidden_loss, - ) - if self.different_base_models: - teacher_enc_outputs = all_teacher_encoder_outputs["last_hidden_state"] - elif self.do_calc_hidden_loss: - hid_loss_enc = self.calc_hidden_loss( - src_mask, - student_outputs["encoder_hidden_states"], - all_teacher_encoder_outputs["hidden_states"], - self.e_matches, - normalize_hidden=self.hparams.normalize_hidden, - ) - - teacher_outputs = self.teacher( - input_ids, - attention_mask=src_mask, - encoder_outputs=(teacher_enc_outputs,), - decoder_input_ids=decoder_input_ids, - output_hidden_states=self.do_calc_hidden_loss, - use_cache=False, # since we are not passing labels, never let this default to True - ) - dec_mask = decoder_input_ids.ne(pad_token_id) - loss_ce = self.calc_ce_loss(dec_mask, lm_logits, teacher_outputs["logits"]) - if self.do_calc_hidden_loss: # Intermediate supervision of decoder hidden states - hid_loss_dec = self.calc_hidden_loss( - dec_mask, - student_outputs["decoder_hidden_states"], - teacher_outputs["decoder_hidden_states"], - self.d_matches, - normalize_hidden=self.hparams.normalize_hidden, - ) - - blended_loss = ( - self.alpha_ce * loss_ce - + self.alpha_mlm * student_lm_loss - + self.hparams.alpha_hid * (hid_loss_enc + hid_loss_dec) - ) - return blended_loss, loss_ce, student_lm_loss, hid_loss_enc, hid_loss_dec - - @staticmethod - def calc_hidden_loss(attention_mask, hidden_states, hidden_states_T, matches, normalize_hidden): - """MSE(student_hid, teacher_hid[matches]). Called "Intermediate supervision" in paper. Inspired by TinyBERT.""" - msg = "expected list or tuple for hidden_states, got tensor of shape: " - assert not isinstance(hidden_states, torch.Tensor), f"{msg}{hidden_states.shape}" - assert not isinstance(hidden_states_T, torch.Tensor), f"{msg}{hidden_states_T.shape}" - mask = attention_mask.to(hidden_states[0]) - valid_count = mask.sum() * hidden_states[0].size(-1) - student_states = torch.stack([hidden_states[i] for i in range(len(matches))]) - teacher_states = torch.stack([hidden_states_T[j] for j in matches]) - assert student_states.shape == teacher_states.shape, f"{student_states.shape} != {teacher_states.shape}" - if normalize_hidden: - student_states = nn.functional.layer_norm(student_states, student_states.shape[1:]) - teacher_states = nn.functional.layer_norm(teacher_states, teacher_states.shape[1:]) - mse = nn.functional.mse_loss(student_states, teacher_states, reduction="none") - masked_mse = (mse * mask.unsqueeze(0).unsqueeze(-1)).sum() / valid_count - return masked_mse - - -def add_distill_args(parser): - # NOTE: if --student argument was specified and the teacher and student base models - # are different, the models still have to have the same tokenizer, specified by - # --tokenizer_name. So, for example, you can distill from t5_large to t5_small but not - # from bart to t5. This s because if the tokenizers are different, the output space - # for the two models is also different and their logits are not comparable. - parser.add_argument("--teacher", type=str) - parser.add_argument("--alpha_ce", default=0.8, type=float) - parser.add_argument("--alpha_mlm", default=0.2, type=float) - parser.add_argument("--alpha_hid", default=0.0, type=float, required=False) - parser.add_argument("--student", type=str, required=False) - parser.add_argument("--student_decoder_layers", default=12, type=int, required=False) - parser.add_argument("--student_encoder_layers", default=12, type=int, required=False) - parser.add_argument("--no_teacher", action="store_true", default=False) - parser.add_argument("--length_penalty", type=float, default=-1) - parser.add_argument("--supervise_forward", action="store_true", default=False) - parser.add_argument("--normalize_hidden", action="store_true", default=False) - - -class TranslationDistiller(SummarizationDistiller): - """Supports T5, mBART, Marian, other models that inherit from Bart.""" - - mode = "translation" - metric_names = ["bleu"] - default_val_metric = "bleu" - - def __init__(self, hparams, **kwargs): - super().__init__(hparams, **kwargs) - assert hparams.src_lang is not None - assert hparams.tgt_lang is not None - self.dataset_kwargs["src_lang"] = hparams.src_lang - self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang - if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer): - self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang] - - def calc_generative_metrics(self, preds, target) -> dict: - return calculate_bleu(preds, target) - - @staticmethod - def add_model_specific_args(parser, root_dir): - TranslationModule.add_model_specific_args(parser, root_dir) - add_distill_args(parser) - return parser - - -def create_module(args): - if args.no_teacher: - module_cls = TranslationModule if "translation" in args.task else SummarizationModule - else: # DISTILL WITH TEACHER - module_cls = TranslationDistiller if "translation" in args.task else SummarizationDistiller - args.setup_cls: str = module_cls.__name__ - print(f"using module {args.setup_cls}") - model = module_cls(args) - return model - - -def distill_main(args): - Path(args.output_dir).mkdir(exist_ok=True) - check_output_dir(args, expected_items=3) - - model = create_module(args) - return ft_main(args, model=model) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser = pl.Trainer.add_argparse_args(parser) - parser = SummarizationDistiller.add_model_specific_args(parser, os.getcwd()) - args = parser.parse_args() - - distill_main(args) diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/models/albert/__init__.py b/spaces/chendl/compositional_test/transformers/src/transformers/models/albert/__init__.py deleted file mode 100644 index 168c68db837d08817e08e493efa81e7419ab9de9..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/models/albert/__init__.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from ...utils import ( - OptionalDependencyNotAvailable, - _LazyModule, - is_flax_available, - is_sentencepiece_available, - is_tf_available, - is_tokenizers_available, - is_torch_available, -) - - -_import_structure = { - "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"], -} - -try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_albert"] = ["AlbertTokenizer"] - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_albert"] = [ - "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", - "AlbertForMaskedLM", - "AlbertForMultipleChoice", - "AlbertForPreTraining", - "AlbertForQuestionAnswering", - "AlbertForSequenceClassification", - "AlbertForTokenClassification", - "AlbertModel", - "AlbertPreTrainedModel", - "load_tf_weights_in_albert", - ] - -try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_tf_albert"] = [ - "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", - "TFAlbertForMaskedLM", - "TFAlbertForMultipleChoice", - "TFAlbertForPreTraining", - "TFAlbertForQuestionAnswering", - "TFAlbertForSequenceClassification", - "TFAlbertForTokenClassification", - "TFAlbertMainLayer", - "TFAlbertModel", - "TFAlbertPreTrainedModel", - ] - -try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_flax_albert"] = [ - "FlaxAlbertForMaskedLM", - "FlaxAlbertForMultipleChoice", - "FlaxAlbertForPreTraining", - "FlaxAlbertForQuestionAnswering", - "FlaxAlbertForSequenceClassification", - "FlaxAlbertForTokenClassification", - "FlaxAlbertModel", - "FlaxAlbertPreTrainedModel", - ] - -if TYPE_CHECKING: - from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig - - try: - if not is_sentencepiece_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_albert import AlbertTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_albert_fast import AlbertTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_albert import ( - ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, - AlbertForMaskedLM, - AlbertForMultipleChoice, - AlbertForPreTraining, - AlbertForQuestionAnswering, - AlbertForSequenceClassification, - AlbertForTokenClassification, - AlbertModel, - AlbertPreTrainedModel, - load_tf_weights_in_albert, - ) - - try: - if not is_tf_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_tf_albert import ( - TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, - TFAlbertForMaskedLM, - TFAlbertForMultipleChoice, - TFAlbertForPreTraining, - TFAlbertForQuestionAnswering, - TFAlbertForSequenceClassification, - TFAlbertForTokenClassification, - TFAlbertMainLayer, - TFAlbertModel, - TFAlbertPreTrainedModel, - ) - - try: - if not is_flax_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_flax_albert import ( - FlaxAlbertForMaskedLM, - FlaxAlbertForMultipleChoice, - FlaxAlbertForPreTraining, - FlaxAlbertForQuestionAnswering, - FlaxAlbertForSequenceClassification, - FlaxAlbertForTokenClassification, - FlaxAlbertModel, - FlaxAlbertPreTrainedModel, - ) -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/backoff/types.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/backoff/types.py deleted file mode 100644 index 25f20a4c43f79a62278b00081c5d7da5dfc12e3e..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/backoff/types.py +++ /dev/null @@ -1,6 +0,0 @@ -# coding:utf-8 -from ._typing import Details - -__all__ = [ - 'Details' -] diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/ttProgram.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/ttProgram.py deleted file mode 100644 index 84aa63f36301ec9a4ae21acff0cbc95010d956b7..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/ttProgram.py +++ /dev/null @@ -1,593 +0,0 @@ -"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs.""" -from __future__ import annotations - -from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin -import array -from io import StringIO -from typing import List -import re -import logging - - -log = logging.getLogger(__name__) - -# fmt: off - -# first, the list of instructions that eat bytes or words from the instruction stream - -streamInstructions = [ -# -# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes -# - (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn - (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn - (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn - (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn -] - - -# next, the list of "normal" instructions - -instructions = [ -# -# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes -# - (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p - - (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n| - (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2) - (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 - - (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue - - (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b - (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f - - (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n) - (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek - (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack - - (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n - - (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - - (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - - (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - - (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - - (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - - (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - - (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n - (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2 - (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e - (0x59, 'EIF', 0, 'EndIf', 0, 0), # - - - (0x1b, 'ELSE', 0, 'Else', 0, 0), # - - - (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - - - (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b - (0x57, 'EVEN', 0, 'Even', 1, 1), # e b - (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f - - (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - - - (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - - - (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue - - (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l - - (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l - - (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n) - (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c - (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result - (0x91, 'GETVARIATION', 0, 'GetVariation', 0, -1), # - a1,..,an - (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py - (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py - (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b - (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b - (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f - - (0x58, 'IF', 0, 'If', 1, 0), # e - - (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v - - (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue - - (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p - - (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - - - (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset - - (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset - - (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset - - (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count - - (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b - (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b - (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2) - (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d - (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p - - (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p - - (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p - - (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2) - (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek - (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p - - (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem - (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize - (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p - - (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64 - (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n - (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b - (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e ) - (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2 - (0x56, 'ODD', 0, 'Odd', 1, 1), # e b - (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b - (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e - - (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value - (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - - - (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - - - (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c - (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2 - (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v - (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - - - (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - - - (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - - - (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - - - (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n - - (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight - - (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n - - (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n - - (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p - - (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n - - (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n - - (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 - - (0x5f, 'SDS', 0, 'SetDeltaShiftInGState', 1, 0), # n - - (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x - - (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - - - (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 - - (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - - - (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c - - (0x32, 'SHP', 1, 'ShiftPointByLastPoint', -1, 0), # p1, p2, ..., ploopvalue - - (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue - - (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e - - (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n - - (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance - - (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x - - (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - - - (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 - - (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n - - (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p - - (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p - - (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p - - (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n - - (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n - - (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2) - (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - - - (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2 - (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n - - (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n - - (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n - - (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n - - (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p - - (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l - - (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l - - (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l - -] - -# fmt: on - - -def bitRepr(value, bits): - s = "" - for i in range(bits): - s = "01"[value & 0x1] + s - value = value >> 1 - return s - - -_mnemonicPat = re.compile(r"[A-Z][A-Z0-9]*$") - - -def _makeDict(instructionList): - opcodeDict = {} - mnemonicDict = {} - for op, mnemonic, argBits, name, pops, pushes in instructionList: - assert _mnemonicPat.match(mnemonic) - mnemonicDict[mnemonic] = op, argBits, name - if argBits: - argoffset = op - for i in range(1 << argBits): - opcodeDict[op + i] = mnemonic, argBits, argoffset, name - else: - opcodeDict[op] = mnemonic, 0, 0, name - return opcodeDict, mnemonicDict - - -streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions) -opcodeDict, mnemonicDict = _makeDict(instructions) - - -class tt_instructions_error(Exception): - def __init__(self, error): - self.error = error - - def __str__(self): - return "TT instructions error: %s" % repr(self.error) - - -_comment = r"/\*.*?\*/" -_instruction = r"([A-Z][A-Z0-9]*)\s*\[(.*?)\]" -_number = r"-?[0-9]+" -_token = "(%s)|(%s)|(%s)" % (_instruction, _number, _comment) - -_tokenRE = re.compile(_token) -_whiteRE = re.compile(r"\s*") - -_pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/") - -_indentRE = re.compile(r"^FDEF|IF|ELSE\[ \]\t.+") -_unindentRE = re.compile(r"^ELSE|ENDF|EIF\[ \]\t.+") - - -def _skipWhite(data, pos): - m = _whiteRE.match(data, pos) - newPos = m.regs[0][1] - assert newPos >= pos - return newPos - - -class Program(object): - def __init__(self) -> None: - pass - - def fromBytecode(self, bytecode: bytes) -> None: - self.bytecode = array.array("B", bytecode) - if hasattr(self, "assembly"): - del self.assembly - - def fromAssembly(self, assembly: List[str] | str) -> None: - if isinstance(assembly, list): - self.assembly = assembly - elif isinstance(assembly, str): - self.assembly = assembly.splitlines() - else: - raise TypeError(f"expected str or List[str], got {type(assembly).__name__}") - if hasattr(self, "bytecode"): - del self.bytecode - - def getBytecode(self) -> bytes: - if not hasattr(self, "bytecode"): - self._assemble() - return self.bytecode.tobytes() - - def getAssembly(self, preserve=True) -> List[str]: - if not hasattr(self, "assembly"): - self._disassemble(preserve=preserve) - return self.assembly - - def toXML(self, writer, ttFont) -> None: - if ( - not hasattr(ttFont, "disassembleInstructions") - or ttFont.disassembleInstructions - ): - try: - assembly = self.getAssembly() - except: - import traceback - - tmp = StringIO() - traceback.print_exc(file=tmp) - msg = "An exception occurred during the decompilation of glyph program:\n\n" - msg += tmp.getvalue() - log.error(msg) - writer.begintag("bytecode") - writer.newline() - writer.comment(msg.strip()) - writer.newline() - writer.dumphex(self.getBytecode()) - writer.endtag("bytecode") - writer.newline() - else: - if not assembly: - return - writer.begintag("assembly") - writer.newline() - i = 0 - indent = 0 - nInstr = len(assembly) - while i < nInstr: - instr = assembly[i] - if _unindentRE.match(instr): - indent -= 1 - writer.write(writer.indentwhite * indent) - writer.write(instr) - writer.newline() - m = _pushCountPat.match(instr) - i = i + 1 - if m: - nValues = int(m.group(1)) - line: List[str] = [] - j = 0 - for j in range(nValues): - if j and not (j % 25): - writer.write(writer.indentwhite * indent) - writer.write(" ".join(line)) - writer.newline() - line = [] - line.append(assembly[i + j]) - writer.write(writer.indentwhite * indent) - writer.write(" ".join(line)) - writer.newline() - i = i + j + 1 - if _indentRE.match(instr): - indent += 1 - writer.endtag("assembly") - writer.newline() - else: - bytecode = self.getBytecode() - if not bytecode: - return - writer.begintag("bytecode") - writer.newline() - writer.dumphex(bytecode) - writer.endtag("bytecode") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont) -> None: - if name == "assembly": - self.fromAssembly(strjoin(content)) - self._assemble() - del self.assembly - else: - assert name == "bytecode" - self.fromBytecode(readHex(content)) - - def _assemble(self) -> None: - assembly = " ".join(getattr(self, "assembly", [])) - bytecode: List[int] = [] - push = bytecode.append - lenAssembly = len(assembly) - pos = _skipWhite(assembly, 0) - while pos < lenAssembly: - m = _tokenRE.match(assembly, pos) - if m is None: - raise tt_instructions_error( - "Syntax error in TT program (%s)" % assembly[pos - 5 : pos + 15] - ) - dummy, mnemonic, arg, number, comment = m.groups() - pos = m.regs[0][1] - if comment: - pos = _skipWhite(assembly, pos) - continue - - arg = arg.strip() - if mnemonic.startswith("INSTR"): - # Unknown instruction - op = int(mnemonic[5:]) - push(op) - elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"): - op, argBits, name = mnemonicDict[mnemonic] - if len(arg) != argBits: - raise tt_instructions_error( - "Incorrect number of argument bits (%s[%s])" % (mnemonic, arg) - ) - if arg: - arg = binary2num(arg) - push(op + arg) - else: - push(op) - else: - args = [] - pos = _skipWhite(assembly, pos) - while pos < lenAssembly: - m = _tokenRE.match(assembly, pos) - if m is None: - raise tt_instructions_error( - "Syntax error in TT program (%s)" % assembly[pos : pos + 15] - ) - dummy, _mnemonic, arg, number, comment = m.groups() - if number is None and comment is None: - break - pos = m.regs[0][1] - pos = _skipWhite(assembly, pos) - if comment is not None: - continue - args.append(int(number)) - nArgs = len(args) - if mnemonic == "PUSH": - # Automatically choose the most compact representation - nWords = 0 - while nArgs: - while ( - nWords < nArgs - and nWords < 255 - and not (0 <= args[nWords] <= 255) - ): - nWords += 1 - nBytes = 0 - while ( - nWords + nBytes < nArgs - and nBytes < 255 - and 0 <= args[nWords + nBytes] <= 255 - ): - nBytes += 1 - if ( - nBytes < 2 - and nWords + nBytes < 255 - and nWords + nBytes != nArgs - ): - # Will write bytes as words - nWords += nBytes - continue - - # Write words - if nWords: - if nWords <= 8: - op, argBits, name = streamMnemonicDict["PUSHW"] - op = op + nWords - 1 - push(op) - else: - op, argBits, name = streamMnemonicDict["NPUSHW"] - push(op) - push(nWords) - for value in args[:nWords]: - assert -32768 <= value < 32768, ( - "PUSH value out of range %d" % value - ) - push((value >> 8) & 0xFF) - push(value & 0xFF) - - # Write bytes - if nBytes: - pass - if nBytes <= 8: - op, argBits, name = streamMnemonicDict["PUSHB"] - op = op + nBytes - 1 - push(op) - else: - op, argBits, name = streamMnemonicDict["NPUSHB"] - push(op) - push(nBytes) - for value in args[nWords : nWords + nBytes]: - push(value) - - nTotal = nWords + nBytes - args = args[nTotal:] - nArgs -= nTotal - nWords = 0 - else: - # Write exactly what we've been asked to - words = mnemonic[-1] == "W" - op, argBits, name = streamMnemonicDict[mnemonic] - if mnemonic[0] != "N": - assert nArgs <= 8, nArgs - op = op + nArgs - 1 - push(op) - else: - assert nArgs < 256 - push(op) - push(nArgs) - if words: - for value in args: - assert -32768 <= value < 32768, ( - "PUSHW value out of range %d" % value - ) - push((value >> 8) & 0xFF) - push(value & 0xFF) - else: - for value in args: - assert 0 <= value < 256, ( - "PUSHB value out of range %d" % value - ) - push(value) - - pos = _skipWhite(assembly, pos) - - if bytecode: - assert max(bytecode) < 256 and min(bytecode) >= 0 - self.bytecode = array.array("B", bytecode) - - def _disassemble(self, preserve=False) -> None: - assembly = [] - i = 0 - bytecode = getattr(self, "bytecode", []) - numBytecode = len(bytecode) - while i < numBytecode: - op = bytecode[i] - try: - mnemonic, argBits, argoffset, name = opcodeDict[op] - except KeyError: - if op in streamOpcodeDict: - values = [] - - # Merge consecutive PUSH operations - while bytecode[i] in streamOpcodeDict: - op = bytecode[i] - mnemonic, argBits, argoffset, name = streamOpcodeDict[op] - words = mnemonic[-1] == "W" - if argBits: - nValues = op - argoffset + 1 - else: - i = i + 1 - nValues = bytecode[i] - i = i + 1 - assert nValues > 0 - if not words: - for j in range(nValues): - value = bytecode[i] - values.append(repr(value)) - i = i + 1 - else: - for j in range(nValues): - # cast to signed int16 - value = (bytecode[i] << 8) | bytecode[i + 1] - if value >= 0x8000: - value = value - 0x10000 - values.append(repr(value)) - i = i + 2 - if preserve: - break - - if not preserve: - mnemonic = "PUSH" - nValues = len(values) - if nValues == 1: - assembly.append("%s[ ] /* 1 value pushed */" % mnemonic) - else: - assembly.append( - "%s[ ] /* %s values pushed */" % (mnemonic, nValues) - ) - assembly.extend(values) - else: - assembly.append("INSTR%d[ ]" % op) - i = i + 1 - else: - if argBits: - assembly.append( - mnemonic - + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name) - ) - else: - assembly.append(mnemonic + "[ ] /* %s */" % name) - i = i + 1 - self.assembly = assembly - - def __bool__(self) -> bool: - """ - >>> p = Program() - >>> bool(p) - False - >>> bc = array.array("B", [0]) - >>> p.fromBytecode(bc) - >>> bool(p) - True - >>> p.bytecode.pop() - 0 - >>> bool(p) - False - - >>> p = Program() - >>> asm = ['SVTCA[0]'] - >>> p.fromAssembly(asm) - >>> bool(p) - True - >>> p.assembly.pop() - 'SVTCA[0]' - >>> bool(p) - False - """ - return (hasattr(self, "assembly") and len(self.assembly) > 0) or ( - hasattr(self, "bytecode") and len(self.bytecode) > 0 - ) - - __nonzero__ = __bool__ - - def __eq__(self, other) -> bool: - if type(self) != type(other): - return NotImplemented - return self.__dict__ == other.__dict__ - - def __ne__(self, other) -> bool: - result = self.__eq__(other) - return result if result is NotImplemented else not result - - -def _test(): - """ - >>> _test() - True - """ - - bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033Data Becker Rechnungsdruckerei 2013 Pro Crack.rarl

Download Ziphttps://tinurli.com/2uwiVx



- - aaccfb2cb3
-
-
-

diff --git a/spaces/cihyFjudo/fairness-paper-search/Stickamvids Xxlovetoskate22x.avi.md b/spaces/cihyFjudo/fairness-paper-search/Stickamvids Xxlovetoskate22x.avi.md deleted file mode 100644 index 2ee7b52f62178d6a3acdb23c326b4b846abead8e..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Stickamvids Xxlovetoskate22x.avi.md +++ /dev/null @@ -1,6 +0,0 @@ -

Stickamvids xxlovetoskate22x.avi


Download Ziphttps://tinurli.com/2uwjAQ



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageMorph.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageMorph.py deleted file mode 100644 index 6fccc315b3d25cf2cfe2dec952c938041f1d4531..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/PIL/ImageMorph.py +++ /dev/null @@ -1,254 +0,0 @@ -# A binary morphology add-on for the Python Imaging Library -# -# History: -# 2014-06-04 Initial version. -# -# Copyright (c) 2014 Dov Grobgeld - -import re - -from . import Image, _imagingmorph - -LUT_SIZE = 1 << 9 - -# fmt: off -ROTATION_MATRIX = [ - 6, 3, 0, - 7, 4, 1, - 8, 5, 2, -] -MIRROR_MATRIX = [ - 2, 1, 0, - 5, 4, 3, - 8, 7, 6, -] -# fmt: on - - -class LutBuilder: - """A class for building a MorphLut from a descriptive language - - The input patterns is a list of a strings sequences like these:: - - 4:(... - .1. - 111)->1 - - (whitespaces including linebreaks are ignored). The option 4 - describes a series of symmetry operations (in this case a - 4-rotation), the pattern is described by: - - - . or X - Ignore - - 1 - Pixel is on - - 0 - Pixel is off - - The result of the operation is described after "->" string. - - The default is to return the current pixel value, which is - returned if no other match is found. - - Operations: - - - 4 - 4 way rotation - - N - Negate - - 1 - Dummy op for no other operation (an op must always be given) - - M - Mirroring - - Example:: - - lb = LutBuilder(patterns = ["4:(... .1. 111)->1"]) - lut = lb.build_lut() - - """ - - def __init__(self, patterns=None, op_name=None): - if patterns is not None: - self.patterns = patterns - else: - self.patterns = [] - self.lut = None - if op_name is not None: - known_patterns = { - "corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"], - "dilation4": ["4:(... .0. .1.)->1"], - "dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"], - "erosion4": ["4:(... .1. .0.)->0"], - "erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"], - "edge": [ - "1:(... ... ...)->0", - "4:(.0. .1. ...)->1", - "4:(01. .1. ...)->1", - ], - } - if op_name not in known_patterns: - msg = "Unknown pattern " + op_name + "!" - raise Exception(msg) - - self.patterns = known_patterns[op_name] - - def add_patterns(self, patterns): - self.patterns += patterns - - def build_default_lut(self): - symbols = [0, 1] - m = 1 << 4 # pos of current pixel - self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE)) - - def get_lut(self): - return self.lut - - def _string_permute(self, pattern, permutation): - """string_permute takes a pattern and a permutation and returns the - string permuted according to the permutation list. - """ - assert len(permutation) == 9 - return "".join(pattern[p] for p in permutation) - - def _pattern_permute(self, basic_pattern, options, basic_result): - """pattern_permute takes a basic pattern and its result and clones - the pattern according to the modifications described in the $options - parameter. It returns a list of all cloned patterns.""" - patterns = [(basic_pattern, basic_result)] - - # rotations - if "4" in options: - res = patterns[-1][1] - for i in range(4): - patterns.append( - (self._string_permute(patterns[-1][0], ROTATION_MATRIX), res) - ) - # mirror - if "M" in options: - n = len(patterns) - for pattern, res in patterns[:n]: - patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res)) - - # negate - if "N" in options: - n = len(patterns) - for pattern, res in patterns[:n]: - # Swap 0 and 1 - pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1") - res = 1 - int(res) - patterns.append((pattern, res)) - - return patterns - - def build_lut(self): - """Compile all patterns into a morphology lut. - - TBD :Build based on (file) morphlut:modify_lut - """ - self.build_default_lut() - patterns = [] - - # Parse and create symmetries of the patterns strings - for p in self.patterns: - m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", "")) - if not m: - msg = 'Syntax error in pattern "' + p + '"' - raise Exception(msg) - options = m.group(1) - pattern = m.group(2) - result = int(m.group(3)) - - # Get rid of spaces - pattern = pattern.replace(" ", "").replace("\n", "") - - patterns += self._pattern_permute(pattern, options, result) - - # compile the patterns into regular expressions for speed - for i, pattern in enumerate(patterns): - p = pattern[0].replace(".", "X").replace("X", "[01]") - p = re.compile(p) - patterns[i] = (p, pattern[1]) - - # Step through table and find patterns that match. - # Note that all the patterns are searched. The last one - # caught overrides - for i in range(LUT_SIZE): - # Build the bit pattern - bitpattern = bin(i)[2:] - bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1] - - for p, r in patterns: - if p.match(bitpattern): - self.lut[i] = [0, 1][r] - - return self.lut - - -class MorphOp: - """A class for binary morphological operators""" - - def __init__(self, lut=None, op_name=None, patterns=None): - """Create a binary morphological operator""" - self.lut = lut - if op_name is not None: - self.lut = LutBuilder(op_name=op_name).build_lut() - elif patterns is not None: - self.lut = LutBuilder(patterns=patterns).build_lut() - - def apply(self, image): - """Run a single morphological operation on an image - - Returns a tuple of the number of changed pixels and the - morphed image""" - if self.lut is None: - msg = "No operator loaded" - raise Exception(msg) - - if image.mode != "L": - msg = "Image mode must be L" - raise ValueError(msg) - outimage = Image.new(image.mode, image.size, None) - count = _imagingmorph.apply(bytes(self.lut), image.im.id, outimage.im.id) - return count, outimage - - def match(self, image): - """Get a list of coordinates matching the morphological operation on - an image. - - Returns a list of tuples of (x,y) coordinates - of all matching pixels. See :ref:`coordinate-system`.""" - if self.lut is None: - msg = "No operator loaded" - raise Exception(msg) - - if image.mode != "L": - msg = "Image mode must be L" - raise ValueError(msg) - return _imagingmorph.match(bytes(self.lut), image.im.id) - - def get_on_pixels(self, image): - """Get a list of all turned on pixels in a binary image - - Returns a list of tuples of (x,y) coordinates - of all matching pixels. See :ref:`coordinate-system`.""" - - if image.mode != "L": - msg = "Image mode must be L" - raise ValueError(msg) - return _imagingmorph.get_on_pixels(image.im.id) - - def load_lut(self, filename): - """Load an operator from an mrl file""" - with open(filename, "rb") as f: - self.lut = bytearray(f.read()) - - if len(self.lut) != LUT_SIZE: - self.lut = None - msg = "Wrong size operator file!" - raise Exception(msg) - - def save_lut(self, filename): - """Save an operator to an mrl file""" - if self.lut is None: - msg = "No operator loaded" - raise Exception(msg) - with open(filename, "wb") as f: - f.write(self.lut) - - def set_lut(self, lut): - """Set the lut from an external source""" - self.lut = lut diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiohttp/pytest_plugin.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiohttp/pytest_plugin.py deleted file mode 100644 index dd9a9f617901ef2c2fa7c1b4ceb5dd92ecbfd5de..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/aiohttp/pytest_plugin.py +++ /dev/null @@ -1,391 +0,0 @@ -import asyncio -import contextlib -import warnings -from collections.abc import Callable -from typing import Any, Awaitable, Callable, Dict, Generator, Optional, Union - -import pytest - -from aiohttp.helpers import PY_37, isasyncgenfunction -from aiohttp.web import Application - -from .test_utils import ( - BaseTestServer, - RawTestServer, - TestClient, - TestServer, - loop_context, - setup_test_loop, - teardown_test_loop, - unused_port as _unused_port, -) - -try: - import uvloop -except ImportError: # pragma: no cover - uvloop = None - -try: - import tokio -except ImportError: # pragma: no cover - tokio = None - -AiohttpClient = Callable[[Union[Application, BaseTestServer]], Awaitable[TestClient]] - - -def pytest_addoption(parser): # type: ignore[no-untyped-def] - parser.addoption( - "--aiohttp-fast", - action="store_true", - default=False, - help="run tests faster by disabling extra checks", - ) - parser.addoption( - "--aiohttp-loop", - action="store", - default="pyloop", - help="run tests with specific loop: pyloop, uvloop, tokio or all", - ) - parser.addoption( - "--aiohttp-enable-loop-debug", - action="store_true", - default=False, - help="enable event loop debug mode", - ) - - -def pytest_fixture_setup(fixturedef): # type: ignore[no-untyped-def] - """Set up pytest fixture. - - Allow fixtures to be coroutines. Run coroutine fixtures in an event loop. - """ - func = fixturedef.func - - if isasyncgenfunction(func): - # async generator fixture - is_async_gen = True - elif asyncio.iscoroutinefunction(func): - # regular async fixture - is_async_gen = False - else: - # not an async fixture, nothing to do - return - - strip_request = False - if "request" not in fixturedef.argnames: - fixturedef.argnames += ("request",) - strip_request = True - - def wrapper(*args, **kwargs): # type: ignore[no-untyped-def] - request = kwargs["request"] - if strip_request: - del kwargs["request"] - - # if neither the fixture nor the test use the 'loop' fixture, - # 'getfixturevalue' will fail because the test is not parameterized - # (this can be removed someday if 'loop' is no longer parameterized) - if "loop" not in request.fixturenames: - raise Exception( - "Asynchronous fixtures must depend on the 'loop' fixture or " - "be used in tests depending from it." - ) - - _loop = request.getfixturevalue("loop") - - if is_async_gen: - # for async generators, we need to advance the generator once, - # then advance it again in a finalizer - gen = func(*args, **kwargs) - - def finalizer(): # type: ignore[no-untyped-def] - try: - return _loop.run_until_complete(gen.__anext__()) - except StopAsyncIteration: - pass - - request.addfinalizer(finalizer) - return _loop.run_until_complete(gen.__anext__()) - else: - return _loop.run_until_complete(func(*args, **kwargs)) - - fixturedef.func = wrapper - - -@pytest.fixture -def fast(request): # type: ignore[no-untyped-def] - """--fast config option""" - return request.config.getoption("--aiohttp-fast") - - -@pytest.fixture -def loop_debug(request): # type: ignore[no-untyped-def] - """--enable-loop-debug config option""" - return request.config.getoption("--aiohttp-enable-loop-debug") - - -@contextlib.contextmanager -def _runtime_warning_context(): # type: ignore[no-untyped-def] - """Context manager which checks for RuntimeWarnings. - - This exists specifically to - avoid "coroutine 'X' was never awaited" warnings being missed. - - If RuntimeWarnings occur in the context a RuntimeError is raised. - """ - with warnings.catch_warnings(record=True) as _warnings: - yield - rw = [ - "{w.filename}:{w.lineno}:{w.message}".format(w=w) - for w in _warnings - if w.category == RuntimeWarning - ] - if rw: - raise RuntimeError( - "{} Runtime Warning{},\n{}".format( - len(rw), "" if len(rw) == 1 else "s", "\n".join(rw) - ) - ) - - -@contextlib.contextmanager -def _passthrough_loop_context(loop, fast=False): # type: ignore[no-untyped-def] - """Passthrough loop context. - - Sets up and tears down a loop unless one is passed in via the loop - argument when it's passed straight through. - """ - if loop: - # loop already exists, pass it straight through - yield loop - else: - # this shadows loop_context's standard behavior - loop = setup_test_loop() - yield loop - teardown_test_loop(loop, fast=fast) - - -def pytest_pycollect_makeitem(collector, name, obj): # type: ignore[no-untyped-def] - """Fix pytest collecting for coroutines.""" - if collector.funcnamefilter(name) and asyncio.iscoroutinefunction(obj): - return list(collector._genfunctions(name, obj)) - - -def pytest_pyfunc_call(pyfuncitem): # type: ignore[no-untyped-def] - """Run coroutines in an event loop instead of a normal function call.""" - fast = pyfuncitem.config.getoption("--aiohttp-fast") - if asyncio.iscoroutinefunction(pyfuncitem.function): - existing_loop = pyfuncitem.funcargs.get( - "proactor_loop" - ) or pyfuncitem.funcargs.get("loop", None) - with _runtime_warning_context(): - with _passthrough_loop_context(existing_loop, fast=fast) as _loop: - testargs = { - arg: pyfuncitem.funcargs[arg] - for arg in pyfuncitem._fixtureinfo.argnames - } - _loop.run_until_complete(pyfuncitem.obj(**testargs)) - - return True - - -def pytest_generate_tests(metafunc): # type: ignore[no-untyped-def] - if "loop_factory" not in metafunc.fixturenames: - return - - loops = metafunc.config.option.aiohttp_loop - avail_factories = {"pyloop": asyncio.DefaultEventLoopPolicy} - - if uvloop is not None: # pragma: no cover - avail_factories["uvloop"] = uvloop.EventLoopPolicy - - if tokio is not None: # pragma: no cover - avail_factories["tokio"] = tokio.EventLoopPolicy - - if loops == "all": - loops = "pyloop,uvloop?,tokio?" - - factories = {} # type: ignore[var-annotated] - for name in loops.split(","): - required = not name.endswith("?") - name = name.strip(" ?") - if name not in avail_factories: # pragma: no cover - if required: - raise ValueError( - "Unknown loop '%s', available loops: %s" - % (name, list(factories.keys())) - ) - else: - continue - factories[name] = avail_factories[name] - metafunc.parametrize( - "loop_factory", list(factories.values()), ids=list(factories.keys()) - ) - - -@pytest.fixture -def loop(loop_factory, fast, loop_debug): # type: ignore[no-untyped-def] - """Return an instance of the event loop.""" - policy = loop_factory() - asyncio.set_event_loop_policy(policy) - with loop_context(fast=fast) as _loop: - if loop_debug: - _loop.set_debug(True) # pragma: no cover - asyncio.set_event_loop(_loop) - yield _loop - - -@pytest.fixture -def proactor_loop(): # type: ignore[no-untyped-def] - if not PY_37: - policy = asyncio.get_event_loop_policy() - policy._loop_factory = asyncio.ProactorEventLoop # type: ignore[attr-defined] - else: - policy = asyncio.WindowsProactorEventLoopPolicy() # type: ignore[attr-defined] - asyncio.set_event_loop_policy(policy) - - with loop_context(policy.new_event_loop) as _loop: - asyncio.set_event_loop(_loop) - yield _loop - - -@pytest.fixture -def unused_port(aiohttp_unused_port): # type: ignore[no-untyped-def] # pragma: no cover - warnings.warn( - "Deprecated, use aiohttp_unused_port fixture instead", - DeprecationWarning, - stacklevel=2, - ) - return aiohttp_unused_port - - -@pytest.fixture -def aiohttp_unused_port(): # type: ignore[no-untyped-def] - """Return a port that is unused on the current host.""" - return _unused_port - - -@pytest.fixture -def aiohttp_server(loop): # type: ignore[no-untyped-def] - """Factory to create a TestServer instance, given an app. - - aiohttp_server(app, **kwargs) - """ - servers = [] - - async def go(app, *, port=None, **kwargs): # type: ignore[no-untyped-def] - server = TestServer(app, port=port) - await server.start_server(loop=loop, **kwargs) - servers.append(server) - return server - - yield go - - async def finalize() -> None: - while servers: - await servers.pop().close() - - loop.run_until_complete(finalize()) - - -@pytest.fixture -def test_server(aiohttp_server): # type: ignore[no-untyped-def] # pragma: no cover - warnings.warn( - "Deprecated, use aiohttp_server fixture instead", - DeprecationWarning, - stacklevel=2, - ) - return aiohttp_server - - -@pytest.fixture -def aiohttp_raw_server(loop): # type: ignore[no-untyped-def] - """Factory to create a RawTestServer instance, given a web handler. - - aiohttp_raw_server(handler, **kwargs) - """ - servers = [] - - async def go(handler, *, port=None, **kwargs): # type: ignore[no-untyped-def] - server = RawTestServer(handler, port=port) - await server.start_server(loop=loop, **kwargs) - servers.append(server) - return server - - yield go - - async def finalize() -> None: - while servers: - await servers.pop().close() - - loop.run_until_complete(finalize()) - - -@pytest.fixture -def raw_test_server( # type: ignore[no-untyped-def] # pragma: no cover - aiohttp_raw_server, -): - warnings.warn( - "Deprecated, use aiohttp_raw_server fixture instead", - DeprecationWarning, - stacklevel=2, - ) - return aiohttp_raw_server - - -@pytest.fixture -def aiohttp_client( - loop: asyncio.AbstractEventLoop, -) -> Generator[AiohttpClient, None, None]: - """Factory to create a TestClient instance. - - aiohttp_client(app, **kwargs) - aiohttp_client(server, **kwargs) - aiohttp_client(raw_server, **kwargs) - """ - clients = [] - - async def go( - __param: Union[Application, BaseTestServer], - *args: Any, - server_kwargs: Optional[Dict[str, Any]] = None, - **kwargs: Any - ) -> TestClient: - - if isinstance(__param, Callable) and not isinstance( # type: ignore[arg-type] - __param, (Application, BaseTestServer) - ): - __param = __param(loop, *args, **kwargs) - kwargs = {} - else: - assert not args, "args should be empty" - - if isinstance(__param, Application): - server_kwargs = server_kwargs or {} - server = TestServer(__param, loop=loop, **server_kwargs) - client = TestClient(server, loop=loop, **kwargs) - elif isinstance(__param, BaseTestServer): - client = TestClient(__param, loop=loop, **kwargs) - else: - raise ValueError("Unknown argument type: %r" % type(__param)) - - await client.start_server() - clients.append(client) - return client - - yield go - - async def finalize() -> None: - while clients: - await clients.pop().close() - - loop.run_until_complete(finalize()) - - -@pytest.fixture -def test_client(aiohttp_client): # type: ignore[no-untyped-def] # pragma: no cover - warnings.warn( - "Deprecated, use aiohttp_client fixture instead", - DeprecationWarning, - stacklevel=2, - ) - return aiohttp_client diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/T_T_F_A_.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/T_T_F_A_.py deleted file mode 100644 index e3cf2db2d744cdda880ec1255808f60bc3795c61..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/T_T_F_A_.py +++ /dev/null @@ -1,5 +0,0 @@ -from . import asciiTable - - -class table_T_T_F_A_(asciiTable.asciiTable): - pass diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cyuv.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cyuv.c deleted file mode 100644 index 0765f41ca3021a7fd710d904b429a2374ac3a433..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cyuv.c +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Creative YUV (CYUV) Video Decoder - * by Mike Melanson (melanson@pcisys.net) - * based on "Creative YUV (CYUV) stream format for AVI": - * http://www.csse.monash.edu.au/~timf/videocodec/cyuv.txt - * - * Copyright (C) 2003 The FFmpeg project - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Creative YUV (CYUV) Video Decoder. - */ - -#include "config_components.h" - -#include - -#include "avcodec.h" -#include "codec_internal.h" -#include "decode.h" -#include "libavutil/internal.h" - -static av_cold int cyuv_decode_init(AVCodecContext *avctx) -{ - /* width needs to be divisible by 4 for this codec to work */ - if (avctx->width & 0x3) - return AVERROR_INVALIDDATA; - - return 0; -} - -static int cyuv_decode_frame(AVCodecContext *avctx, AVFrame *frame, - int *got_frame, AVPacket *avpkt) -{ - const uint8_t *buf = avpkt->data; - int buf_size = avpkt->size; - - unsigned char *y_plane; - unsigned char *u_plane; - unsigned char *v_plane; - int y_ptr; - int u_ptr; - int v_ptr; - - /* prediction error tables (make it clear that they are signed values) */ - const signed char *y_table = (const signed char*)buf + 0; - const signed char *u_table = (const signed char*)buf + 16; - const signed char *v_table = (const signed char*)buf + 32; - - unsigned char y_pred, u_pred, v_pred; - int stream_ptr; - unsigned char cur_byte; - int pixel_groups; - int rawsize = avctx->height * FFALIGN(avctx->width,2) * 2; - int ret; - - if (avctx->codec_id == AV_CODEC_ID_AURA) { - y_table = u_table; - u_table = v_table; - } - /* sanity check the buffer size: A buffer has 3x16-bytes tables - * followed by (height) lines each with 3 bytes to represent groups - * of 4 pixels. Thus, the total size of the buffer ought to be: - * (3 * 16) + height * (width * 3 / 4) */ - if (buf_size == 48 + avctx->height * (avctx->width * 3 / 4)) { - avctx->pix_fmt = AV_PIX_FMT_YUV411P; - } else if(buf_size == rawsize ) { - avctx->pix_fmt = AV_PIX_FMT_UYVY422; - } else { - av_log(avctx, AV_LOG_ERROR, "got a buffer with %d bytes when %d were expected\n", - buf_size, 48 + avctx->height * (avctx->width * 3 / 4)); - return AVERROR_INVALIDDATA; - } - - /* pixel data starts 48 bytes in, after 3x16-byte tables */ - stream_ptr = 48; - - if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) - return ret; - - y_plane = frame->data[0]; - u_plane = frame->data[1]; - v_plane = frame->data[2]; - - if (buf_size == rawsize) { - int linesize = FFALIGN(avctx->width, 2) * 2; - y_plane += frame->linesize[0] * avctx->height; - for (stream_ptr = 0; stream_ptr < rawsize; stream_ptr += linesize) { - y_plane -= frame->linesize[0]; - memcpy(y_plane, buf+stream_ptr, linesize); - } - } else { - - /* iterate through each line in the height */ - for (y_ptr = 0, u_ptr = 0, v_ptr = 0; - y_ptr < (avctx->height * frame->linesize[0]); - y_ptr += frame->linesize[0] - avctx->width, - u_ptr += frame->linesize[1] - avctx->width / 4, - v_ptr += frame->linesize[2] - avctx->width / 4) { - - /* reset predictors */ - cur_byte = buf[stream_ptr++]; - u_plane[u_ptr++] = u_pred = cur_byte & 0xF0; - y_plane[y_ptr++] = y_pred = (cur_byte & 0x0F) << 4; - - cur_byte = buf[stream_ptr++]; - v_plane[v_ptr++] = v_pred = cur_byte & 0xF0; - y_pred += y_table[cur_byte & 0x0F]; - y_plane[y_ptr++] = y_pred; - - cur_byte = buf[stream_ptr++]; - y_pred += y_table[cur_byte & 0x0F]; - y_plane[y_ptr++] = y_pred; - y_pred += y_table[(cur_byte & 0xF0) >> 4]; - y_plane[y_ptr++] = y_pred; - - /* iterate through the remaining pixel groups (4 pixels/group) */ - pixel_groups = avctx->width / 4 - 1; - while (pixel_groups--) { - - cur_byte = buf[stream_ptr++]; - u_pred += u_table[(cur_byte & 0xF0) >> 4]; - u_plane[u_ptr++] = u_pred; - y_pred += y_table[cur_byte & 0x0F]; - y_plane[y_ptr++] = y_pred; - - cur_byte = buf[stream_ptr++]; - v_pred += v_table[(cur_byte & 0xF0) >> 4]; - v_plane[v_ptr++] = v_pred; - y_pred += y_table[cur_byte & 0x0F]; - y_plane[y_ptr++] = y_pred; - - cur_byte = buf[stream_ptr++]; - y_pred += y_table[cur_byte & 0x0F]; - y_plane[y_ptr++] = y_pred; - y_pred += y_table[(cur_byte & 0xF0) >> 4]; - y_plane[y_ptr++] = y_pred; - - } - } - } - - *got_frame = 1; - - return buf_size; -} - -#if CONFIG_AURA_DECODER -const FFCodec ff_aura_decoder = { - .p.name = "aura", - CODEC_LONG_NAME("Auravision AURA"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_AURA, - .init = cyuv_decode_init, - FF_CODEC_DECODE_CB(cyuv_decode_frame), - .p.capabilities = AV_CODEC_CAP_DR1, -}; -#endif - -#if CONFIG_CYUV_DECODER -const FFCodec ff_cyuv_decoder = { - .p.name = "cyuv", - CODEC_LONG_NAME("Creative YUV (CYUV)"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_CYUV, - .init = cyuv_decode_init, - FF_CODEC_DECODE_CB(cyuv_decode_frame), - .p.capabilities = AV_CODEC_CAP_DR1, -}; -#endif diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dovi_rpu.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dovi_rpu.c deleted file mode 100644 index dd3893655214d04a424ab5fe78d2f3081eaeb20c..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dovi_rpu.c +++ /dev/null @@ -1,449 +0,0 @@ -/* - * Dolby Vision RPU decoder - * - * Copyright (C) 2021 Jan Ekström - * Copyright (C) 2021 Niklas Haas - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/buffer.h" - -#include "dovi_rpu.h" -#include "golomb.h" -#include "get_bits.h" - -enum { - RPU_COEFF_FIXED = 0, - RPU_COEFF_FLOAT = 1, -}; - -/** - * Private contents of vdr_ref. - */ -typedef struct DOVIVdrRef { - AVDOVIDataMapping mapping; - AVDOVIColorMetadata color; -} DOVIVdrRef; - -void ff_dovi_ctx_unref(DOVIContext *s) -{ - for (int i = 0; i < FF_ARRAY_ELEMS(s->vdr_ref); i++) - av_buffer_unref(&s->vdr_ref[i]); - - *s = (DOVIContext) { - .logctx = s->logctx, - }; -} - -void ff_dovi_ctx_flush(DOVIContext *s) -{ - for (int i = 0; i < FF_ARRAY_ELEMS(s->vdr_ref); i++) - av_buffer_unref(&s->vdr_ref[i]); - - *s = (DOVIContext) { - .logctx = s->logctx, - .dv_profile = s->dv_profile, - }; -} - -int ff_dovi_ctx_replace(DOVIContext *s, const DOVIContext *s0) -{ - int ret; - s->logctx = s0->logctx; - s->mapping = s0->mapping; - s->color = s0->color; - s->dv_profile = s0->dv_profile; - for (int i = 0; i < DOVI_MAX_DM_ID; i++) { - if ((ret = av_buffer_replace(&s->vdr_ref[i], s0->vdr_ref[i])) < 0) - goto fail; - } - - return 0; - -fail: - ff_dovi_ctx_unref(s); - return ret; -} - -void ff_dovi_update_cfg(DOVIContext *s, const AVDOVIDecoderConfigurationRecord *cfg) -{ - if (!cfg) - return; - - s->dv_profile = cfg->dv_profile; -} - -int ff_dovi_attach_side_data(DOVIContext *s, AVFrame *frame) -{ - AVFrameSideData *sd; - AVBufferRef *buf; - AVDOVIMetadata *dovi; - size_t dovi_size; - - if (!s->mapping || !s->color) - return 0; /* incomplete dovi metadata */ - - dovi = av_dovi_metadata_alloc(&dovi_size); - if (!dovi) - return AVERROR(ENOMEM); - - buf = av_buffer_create((uint8_t *) dovi, dovi_size, NULL, NULL, 0); - if (!buf) { - av_free(dovi); - return AVERROR(ENOMEM); - } - - sd = av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_DOVI_METADATA, buf); - if (!sd) { - av_buffer_unref(&buf); - return AVERROR(ENOMEM); - } - - /* Copy only the parts of these structs known to us at compiler-time. */ -#define COPY(t, a, b, last) memcpy(a, b, offsetof(t, last) + sizeof((b)->last)) - COPY(AVDOVIRpuDataHeader, av_dovi_get_header(dovi), &s->header, disable_residual_flag); - COPY(AVDOVIDataMapping, av_dovi_get_mapping(dovi), s->mapping, nlq[2].linear_deadzone_threshold); - COPY(AVDOVIColorMetadata, av_dovi_get_color(dovi), s->color, source_diagonal); - return 0; -} - -static int guess_profile(const AVDOVIRpuDataHeader *hdr) -{ - switch (hdr->vdr_rpu_profile) { - case 0: - if (hdr->bl_video_full_range_flag) - return 5; - break; - case 1: - if (hdr->el_spatial_resampling_filter_flag && !hdr->disable_residual_flag) { - if (hdr->vdr_bit_depth == 12) { - return 7; - } else { - return 4; - } - } else { - return 8; - } - } - - return 0; /* unknown */ -} - -static inline uint64_t get_ue_coef(GetBitContext *gb, const AVDOVIRpuDataHeader *hdr) -{ - uint64_t ipart; - union { uint32_t u32; float f32; } fpart; - - switch (hdr->coef_data_type) { - case RPU_COEFF_FIXED: - ipart = get_ue_golomb_long(gb); - fpart.u32 = get_bits_long(gb, hdr->coef_log2_denom); - return (ipart << hdr->coef_log2_denom) + fpart.u32; - - case RPU_COEFF_FLOAT: - fpart.u32 = get_bits_long(gb, 32); - return fpart.f32 * (1 << hdr->coef_log2_denom); - } - - return 0; /* unreachable */ -} - -static inline int64_t get_se_coef(GetBitContext *gb, const AVDOVIRpuDataHeader *hdr) -{ - int64_t ipart; - union { uint32_t u32; float f32; } fpart; - - switch (hdr->coef_data_type) { - case RPU_COEFF_FIXED: - ipart = get_se_golomb_long(gb); - fpart.u32 = get_bits_long(gb, hdr->coef_log2_denom); - return ipart * (1LL << hdr->coef_log2_denom) + fpart.u32; - - case RPU_COEFF_FLOAT: - fpart.u32 = get_bits_long(gb, 32); - return fpart.f32 * (1 << hdr->coef_log2_denom); - } - - return 0; /* unreachable */ -} - -#define VALIDATE(VAR, MIN, MAX) \ - do { \ - if (VAR < MIN || VAR > MAX) { \ - av_log(s->logctx, AV_LOG_ERROR, "RPU validation failed: " \ - #MIN" <= "#VAR" = %d <= "#MAX"\n", (int) VAR); \ - goto fail; \ - } \ - } while (0) - -int ff_dovi_rpu_parse(DOVIContext *s, const uint8_t *rpu, size_t rpu_size) -{ - AVDOVIRpuDataHeader *hdr = &s->header; - GetBitContext *gb = &(GetBitContext){0}; - DOVIVdrRef *vdr; - int ret; - - uint8_t nal_prefix; - uint8_t rpu_type; - uint8_t vdr_seq_info_present; - uint8_t vdr_dm_metadata_present; - uint8_t use_prev_vdr_rpu; - uint8_t use_nlq; - uint8_t profile; - if ((ret = init_get_bits8(gb, rpu, rpu_size)) < 0) - return ret; - - /* RPU header, common values */ - nal_prefix = get_bits(gb, 8); - VALIDATE(nal_prefix, 25, 25); - rpu_type = get_bits(gb, 6); - if (rpu_type != 2) { - av_log(s->logctx, AV_LOG_WARNING, "Unrecognized RPU type " - "%"PRIu8", ignoring\n", rpu_type); - return 0; - } - - hdr->rpu_type = rpu_type; - hdr->rpu_format = get_bits(gb, 11); - - /* Values specific to RPU type 2 */ - hdr->vdr_rpu_profile = get_bits(gb, 4); - hdr->vdr_rpu_level = get_bits(gb, 4); - - vdr_seq_info_present = get_bits1(gb); - if (vdr_seq_info_present) { - hdr->chroma_resampling_explicit_filter_flag = get_bits1(gb); - hdr->coef_data_type = get_bits(gb, 2); - VALIDATE(hdr->coef_data_type, RPU_COEFF_FIXED, RPU_COEFF_FLOAT); - switch (hdr->coef_data_type) { - case RPU_COEFF_FIXED: - hdr->coef_log2_denom = get_ue_golomb(gb); - VALIDATE(hdr->coef_log2_denom, 13, 32); - break; - case RPU_COEFF_FLOAT: - hdr->coef_log2_denom = 32; /* arbitrary, choose maximum precision */ - break; - } - - hdr->vdr_rpu_normalized_idc = get_bits(gb, 2); - hdr->bl_video_full_range_flag = get_bits1(gb); - - if ((hdr->rpu_format & 0x700) == 0) { - int bl_bit_depth_minus8 = get_ue_golomb_31(gb); - int el_bit_depth_minus8 = get_ue_golomb_31(gb); - int vdr_bit_depth_minus8 = get_ue_golomb_31(gb); - VALIDATE(bl_bit_depth_minus8, 0, 8); - VALIDATE(el_bit_depth_minus8, 0, 8); - VALIDATE(vdr_bit_depth_minus8, 0, 8); - hdr->bl_bit_depth = bl_bit_depth_minus8 + 8; - hdr->el_bit_depth = el_bit_depth_minus8 + 8; - hdr->vdr_bit_depth = vdr_bit_depth_minus8 + 8; - hdr->spatial_resampling_filter_flag = get_bits1(gb); - skip_bits(gb, 3); /* reserved_zero_3bits */ - hdr->el_spatial_resampling_filter_flag = get_bits1(gb); - hdr->disable_residual_flag = get_bits1(gb); - } - } - - if (!hdr->bl_bit_depth) { - av_log(s->logctx, AV_LOG_ERROR, "Missing RPU VDR sequence info?\n"); - goto fail; - } - - vdr_dm_metadata_present = get_bits1(gb); - use_prev_vdr_rpu = get_bits1(gb); - use_nlq = (hdr->rpu_format & 0x700) == 0 && !hdr->disable_residual_flag; - - profile = s->dv_profile ? s->dv_profile : guess_profile(hdr); - if (profile == 5 && use_nlq) { - av_log(s->logctx, AV_LOG_ERROR, "Profile 5 RPUs should not use NLQ\n"); - goto fail; - } - - if (use_prev_vdr_rpu) { - int prev_vdr_rpu_id = get_ue_golomb_31(gb); - VALIDATE(prev_vdr_rpu_id, 0, DOVI_MAX_DM_ID); - if (!s->vdr_ref[prev_vdr_rpu_id]) { - av_log(s->logctx, AV_LOG_ERROR, "Unknown previous RPU ID: %u\n", - prev_vdr_rpu_id); - goto fail; - } - vdr = (DOVIVdrRef *) s->vdr_ref[prev_vdr_rpu_id]->data; - s->mapping = &vdr->mapping; - } else { - int vdr_rpu_id = get_ue_golomb_31(gb); - VALIDATE(vdr_rpu_id, 0, DOVI_MAX_DM_ID); - if (!s->vdr_ref[vdr_rpu_id]) { - s->vdr_ref[vdr_rpu_id] = av_buffer_allocz(sizeof(DOVIVdrRef)); - if (!s->vdr_ref[vdr_rpu_id]) - return AVERROR(ENOMEM); - } - - vdr = (DOVIVdrRef *) s->vdr_ref[vdr_rpu_id]->data; - s->mapping = &vdr->mapping; - - vdr->mapping.vdr_rpu_id = vdr_rpu_id; - vdr->mapping.mapping_color_space = get_ue_golomb_31(gb); - vdr->mapping.mapping_chroma_format_idc = get_ue_golomb_31(gb); - - for (int c = 0; c < 3; c++) { - AVDOVIReshapingCurve *curve = &vdr->mapping.curves[c]; - int num_pivots_minus_2 = get_ue_golomb_31(gb); - int pivot = 0; - - VALIDATE(num_pivots_minus_2, 0, AV_DOVI_MAX_PIECES - 1); - curve->num_pivots = num_pivots_minus_2 + 2; - for (int i = 0; i < curve->num_pivots; i++) { - pivot += get_bits(gb, hdr->bl_bit_depth); - curve->pivots[i] = av_clip_uint16(pivot); - } - } - - if (use_nlq) { - vdr->mapping.nlq_method_idc = get_bits(gb, 3); - /** - * The patent mentions another legal value, NLQ_MU_LAW, but it's - * not documented anywhere how to parse or apply that type of NLQ. - */ - VALIDATE(vdr->mapping.nlq_method_idc, 0, AV_DOVI_NLQ_LINEAR_DZ); - } else { - vdr->mapping.nlq_method_idc = AV_DOVI_NLQ_NONE; - } - - vdr->mapping.num_x_partitions = get_ue_golomb_long(gb) + 1; - vdr->mapping.num_y_partitions = get_ue_golomb_long(gb) + 1; - /* End of rpu_data_header(), start of vdr_rpu_data_payload() */ - - for (int c = 0; c < 3; c++) { - AVDOVIReshapingCurve *curve = &vdr->mapping.curves[c]; - for (int i = 0; i < curve->num_pivots - 1; i++) { - int mapping_idc = get_ue_golomb_31(gb); - VALIDATE(mapping_idc, 0, 1); - curve->mapping_idc[i] = mapping_idc; - switch (mapping_idc) { - case AV_DOVI_MAPPING_POLYNOMIAL: { - int poly_order_minus1 = get_ue_golomb_31(gb); - VALIDATE(poly_order_minus1, 0, 1); - curve->poly_order[i] = poly_order_minus1 + 1; - if (poly_order_minus1 == 0) { - int linear_interp_flag = get_bits1(gb); - if (linear_interp_flag) { - /* lack of documentation/samples */ - avpriv_request_sample(s->logctx, "Dolby Vision " - "linear interpolation"); - ff_dovi_ctx_unref(s); - return AVERROR_PATCHWELCOME; - } - } - for (int k = 0; k <= curve->poly_order[i]; k++) - curve->poly_coef[i][k] = get_se_coef(gb, hdr); - break; - } - case AV_DOVI_MAPPING_MMR: { - int mmr_order_minus1 = get_bits(gb, 2); - VALIDATE(mmr_order_minus1, 0, 2); - curve->mmr_order[i] = mmr_order_minus1 + 1; - curve->mmr_constant[i] = get_se_coef(gb, hdr); - for (int j = 0; j < curve->mmr_order[i]; j++) { - for (int k = 0; k < 7; k++) - curve->mmr_coef[i][j][k] = get_se_coef(gb, hdr); - } - break; - } - } - } - } - - if (use_nlq) { - for (int c = 0; c < 3; c++) { - AVDOVINLQParams *nlq = &vdr->mapping.nlq[c]; - nlq->nlq_offset = get_bits(gb, hdr->el_bit_depth); - nlq->vdr_in_max = get_ue_coef(gb, hdr); - switch (vdr->mapping.nlq_method_idc) { - case AV_DOVI_NLQ_LINEAR_DZ: - nlq->linear_deadzone_slope = get_ue_coef(gb, hdr); - nlq->linear_deadzone_threshold = get_ue_coef(gb, hdr); - break; - } - } - } - } - - if (vdr_dm_metadata_present) { - AVDOVIColorMetadata *color; - int affected_dm_id = get_ue_golomb_31(gb); - int current_dm_id = get_ue_golomb_31(gb); - VALIDATE(affected_dm_id, 0, DOVI_MAX_DM_ID); - VALIDATE(current_dm_id, 0, DOVI_MAX_DM_ID); - if (!s->vdr_ref[affected_dm_id]) { - s->vdr_ref[affected_dm_id] = av_buffer_allocz(sizeof(DOVIVdrRef)); - if (!s->vdr_ref[affected_dm_id]) - return AVERROR(ENOMEM); - } - - if (!s->vdr_ref[current_dm_id]) { - av_log(s->logctx, AV_LOG_ERROR, "Unknown previous RPU DM ID: %u\n", - current_dm_id); - goto fail; - } - - /* Update current pointer based on current_dm_id */ - vdr = (DOVIVdrRef *) s->vdr_ref[current_dm_id]->data; - s->color = &vdr->color; - - /* Update values of affected_dm_id */ - vdr = (DOVIVdrRef *) s->vdr_ref[affected_dm_id]->data; - color = &vdr->color; - color->dm_metadata_id = affected_dm_id; - color->scene_refresh_flag = get_ue_golomb_31(gb); - for (int i = 0; i < 9; i++) - color->ycc_to_rgb_matrix[i] = av_make_q(get_sbits(gb, 16), 1 << 13); - for (int i = 0; i < 3; i++) { - int denom = profile == 4 ? (1 << 30) : (1 << 28); - unsigned offset = get_bits_long(gb, 32); - if (offset > INT_MAX) { - /* Ensure the result fits inside AVRational */ - offset >>= 1; - denom >>= 1; - } - color->ycc_to_rgb_offset[i] = av_make_q(offset, denom); - } - for (int i = 0; i < 9; i++) - color->rgb_to_lms_matrix[i] = av_make_q(get_sbits(gb, 16), 1 << 14); - - color->signal_eotf = get_bits(gb, 16); - color->signal_eotf_param0 = get_bits(gb, 16); - color->signal_eotf_param1 = get_bits(gb, 16); - color->signal_eotf_param2 = get_bits_long(gb, 32); - color->signal_bit_depth = get_bits(gb, 5); - VALIDATE(color->signal_bit_depth, 8, 16); - color->signal_color_space = get_bits(gb, 2); - color->signal_chroma_format = get_bits(gb, 2); - color->signal_full_range_flag = get_bits(gb, 2); - color->source_min_pq = get_bits(gb, 12); - color->source_max_pq = get_bits(gb, 12); - color->source_diagonal = get_bits(gb, 10); - } - - /* FIXME: verify CRC32, requires implementation of AV_CRC_32_MPEG_2 */ - return 0; - -fail: - ff_dovi_ctx_unref(s); /* don't leak potentially invalid state */ - return AVERROR(EINVAL); -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dpcm.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dpcm.c deleted file mode 100644 index 6ea9e2c06500f73b790fbeaaaf99ee0938ba7aab..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dpcm.c +++ /dev/null @@ -1,488 +0,0 @@ -/* - * Assorted DPCM codecs - * Copyright (c) 2003 The FFmpeg project - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Assorted DPCM (differential pulse code modulation) audio codecs - * by Mike Melanson (melanson@pcisys.net) - * Xan DPCM decoder by Mario Brito (mbrito@student.dei.uc.pt) - * for more information on the specific data formats, visit: - * http://www.pcisys.net/~melanson/codecs/simpleaudio.html - * SOL DPCMs implemented by Konstantin Shishkov - * - * Note about using the Xan DPCM decoder: Xan DPCM is used in AVI files - * found in the Wing Commander IV computer game. These AVI files contain - * WAVEFORMAT headers which report the audio format as 0x01: raw PCM. - * Clearly incorrect. To detect Xan DPCM, you will probably have to - * special-case your AVI demuxer to use Xan DPCM if the file uses 'Xxan' - * (Xan video) for its video codec. Alternately, such AVI files also contain - * the fourcc 'Axan' in the 'auds' chunk of the AVI header. - */ - -#include "avcodec.h" -#include "bytestream.h" -#include "codec_internal.h" -#include "decode.h" -#include "mathops.h" - -typedef struct DPCMContext { - int16_t array[256]; - int sample[2]; ///< previous sample (for SOL_DPCM and WADY_DPCM) - int scale; ///< scale for WADY_DPCM - const int8_t *sol_table; ///< delta table for SOL_DPCM -} DPCMContext; - -static const int32_t derf_steps[96] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 16, - 17, 19, 21, 23, 25, 28, 31, 34, - 37, 41, 45, 50, 55, 60, 66, 73, - 80, 88, 97, 107, 118, 130, 143, 157, - 173, 190, 209, 230, 253, 279, 307, 337, - 371, 408, 449, 494, 544, 598, 658, 724, - 796, 876, 963, 1060, 1166, 1282, 1411, 1552, - 1707, 1878, 2066, 2272, 2499, 2749, 3024, 3327, - 3660, 4026, 4428, 4871, 5358, 5894, 6484, 7132, - 7845, 8630, 9493, 10442, 11487, 12635, 13899, 15289, - 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767, -}; - -static const int16_t interplay_delta_table[] = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, - 40, 41, 42, 43, 47, 51, 56, 61, - 66, 72, 79, 86, 94, 102, 112, 122, - 133, 145, 158, 173, 189, 206, 225, 245, - 267, 292, 318, 348, 379, 414, 452, 493, - 538, 587, 640, 699, 763, 832, 908, 991, - 1081, 1180, 1288, 1405, 1534, 1673, 1826, 1993, - 2175, 2373, 2590, 2826, 3084, 3365, 3672, 4008, - 4373, 4772, 5208, 5683, 6202, 6767, 7385, 8059, - 8794, 9597, 10472, 11428, 12471, 13609, 14851, 16206, - 17685, 19298, 21060, 22981, 25078, 27367, 29864, 32589, - -29973, -26728, -23186, -19322, -15105, -10503, -5481, -1, - 1, 1, 5481, 10503, 15105, 19322, 23186, 26728, - 29973, -32589, -29864, -27367, -25078, -22981, -21060, -19298, - -17685, -16206, -14851, -13609, -12471, -11428, -10472, -9597, - -8794, -8059, -7385, -6767, -6202, -5683, -5208, -4772, - -4373, -4008, -3672, -3365, -3084, -2826, -2590, -2373, - -2175, -1993, -1826, -1673, -1534, -1405, -1288, -1180, - -1081, -991, -908, -832, -763, -699, -640, -587, - -538, -493, -452, -414, -379, -348, -318, -292, - -267, -245, -225, -206, -189, -173, -158, -145, - -133, -122, -112, -102, -94, -86, -79, -72, - -66, -61, -56, -51, -47, -43, -42, -41, - -40, -39, -38, -37, -36, -35, -34, -33, - -32, -31, -30, -29, -28, -27, -26, -25, - -24, -23, -22, -21, -20, -19, -18, -17, - -16, -15, -14, -13, -12, -11, -10, -9, - -8, -7, -6, -5, -4, -3, -2, -1 - -}; - -static const int8_t sol_table_old[16] = { - 0x0, 0x1, 0x2, 0x3, 0x6, 0xA, 0xF, 0x15, - -0x15, -0xF, -0xA, -0x6, -0x3, -0x2, -0x1, 0x0 -}; - -static const int8_t sol_table_new[16] = { - 0x0, 0x1, 0x2, 0x3, 0x6, 0xA, 0xF, 0x15, - 0x0, -0x1, -0x2, -0x3, -0x6, -0xA, -0xF, -0x15 -}; - -static const int16_t sol_table_16[128] = { - 0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080, - 0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120, - 0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0, - 0x1D0, 0x1E0, 0x1F0, 0x200, 0x208, 0x210, 0x218, 0x220, 0x228, 0x230, - 0x238, 0x240, 0x248, 0x250, 0x258, 0x260, 0x268, 0x270, 0x278, 0x280, - 0x288, 0x290, 0x298, 0x2A0, 0x2A8, 0x2B0, 0x2B8, 0x2C0, 0x2C8, 0x2D0, - 0x2D8, 0x2E0, 0x2E8, 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320, - 0x328, 0x330, 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370, - 0x378, 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3B8, 0x3C0, - 0x3C8, 0x3D0, 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8, 0x400, 0x440, 0x480, - 0x4C0, 0x500, 0x540, 0x580, 0x5C0, 0x600, 0x640, 0x680, 0x6C0, 0x700, - 0x740, 0x780, 0x7C0, 0x800, 0x900, 0xA00, 0xB00, 0xC00, 0xD00, 0xE00, - 0xF00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x3000, 0x4000 -}; - -static const int16_t wady_table[128] = { - 0, 2, 4, 6, 8, 10, 12, 15, - 18, 21, 24, 28, 32, 36, 40, 44, - 49, 54, 59, 64, 70, 76, 82, 88, - 95, 102, 109, 116, 124, 132, 140, 148, - 160, 170, 180, 190, 200, 210, 220, 230, - 240, 255, 270, 285, 300, 320, 340, 360, - 380, 400, 425, 450, 475, 500, 525, 550, - 580, 610, 650, 700, 750, 800, 900, 1000, - -0, -2, -4, -6, -8, -10, -12, -15, - -18, -21, -24, -28, -32, -36, -40, -44, - -49, -54, -59, -64, -70, -76, -82, -88, - -95, -102,-109,-116,-124,-132,-140,-148, - -160,-170,-180,-190,-200,-210,-220,-230, - -240,-255,-270,-285,-300,-320,-340,-360, - -380,-400,-425,-450,-475,-500,-525,-550, - -580,-610,-650,-700,-750,-800,-900,-1000, -}; - -static av_cold int dpcm_decode_init(AVCodecContext *avctx) -{ - DPCMContext *s = avctx->priv_data; - int i; - - if (avctx->ch_layout.nb_channels < 1 || avctx->ch_layout.nb_channels > 2) { - av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n"); - return AVERROR(EINVAL); - } - - s->sample[0] = s->sample[1] = 0; - - switch (avctx->codec->id) { - - case AV_CODEC_ID_ROQ_DPCM: - /* initialize square table */ - for (i = 0; i < 128; i++) { - int16_t square = i * i; - s->array[i ] = square; - s->array[i + 128] = -square; - } - break; - - case AV_CODEC_ID_SOL_DPCM: - switch(avctx->codec_tag){ - case 1: - s->sol_table = sol_table_old; - s->sample[0] = s->sample[1] = 0x80; - break; - case 2: - s->sol_table = sol_table_new; - s->sample[0] = s->sample[1] = 0x80; - break; - case 3: - break; - default: - av_log(avctx, AV_LOG_ERROR, "Unknown SOL subcodec\n"); - return -1; - } - break; - - case AV_CODEC_ID_SDX2_DPCM: - for (i = -128; i < 128; i++) { - int16_t square = i * i * 2; - s->array[i+128] = i < 0 ? -square: square; - } - break; - - case AV_CODEC_ID_CBD2_DPCM: - for (i = -128; i < 128; i++) { - int16_t cube = (i * i * i) / 64; - s->array[i+128] = cube; - } - break; - - case AV_CODEC_ID_GREMLIN_DPCM: { - int delta = 0; - int code = 64; - int step = 45; - - s->array[0] = 0; - for (i = 0; i < 127; i++) { - delta += (code >> 5); - code += step; - step += 2; - - s->array[i*2 + 1] = delta; - s->array[i*2 + 2] = -delta; - } - s->array[255] = delta + (code >> 5); - } - break; - - case AV_CODEC_ID_WADY_DPCM: - s->scale = (avctx->extradata && avctx->extradata_size > 0) ? avctx->extradata[0] : 1; - break; - - default: - break; - } - - if (avctx->codec->id == AV_CODEC_ID_SOL_DPCM && avctx->codec_tag != 3) - avctx->sample_fmt = AV_SAMPLE_FMT_U8; - else - avctx->sample_fmt = AV_SAMPLE_FMT_S16; - - return 0; -} - - -static int dpcm_decode_frame(AVCodecContext *avctx, AVFrame *frame, - int *got_frame_ptr, AVPacket *avpkt) -{ - int buf_size = avpkt->size; - DPCMContext *s = avctx->priv_data; - int out = 0, ret; - int predictor[2]; - int ch = 0; - int stereo = avctx->ch_layout.nb_channels - 1; - int16_t *output_samples, *samples_end; - GetByteContext gb; - - if (stereo && (buf_size & 1)) - buf_size--; - bytestream2_init(&gb, avpkt->data, buf_size); - - /* calculate output size */ - switch(avctx->codec->id) { - case AV_CODEC_ID_ROQ_DPCM: - out = buf_size - 8; - break; - case AV_CODEC_ID_INTERPLAY_DPCM: - out = buf_size - 6 - avctx->ch_layout.nb_channels; - break; - case AV_CODEC_ID_XAN_DPCM: - out = buf_size - 2 * avctx->ch_layout.nb_channels; - break; - case AV_CODEC_ID_SOL_DPCM: - if (avctx->codec_tag != 3) - out = buf_size * 2; - else - out = buf_size; - break; - case AV_CODEC_ID_WADY_DPCM: - case AV_CODEC_ID_DERF_DPCM: - case AV_CODEC_ID_GREMLIN_DPCM: - case AV_CODEC_ID_CBD2_DPCM: - case AV_CODEC_ID_SDX2_DPCM: - out = buf_size; - break; - } - if (out <= 0) { - av_log(avctx, AV_LOG_ERROR, "packet is too small\n"); - return AVERROR(EINVAL); - } - if (out % avctx->ch_layout.nb_channels) { - av_log(avctx, AV_LOG_WARNING, "channels have differing number of samples\n"); - } - - /* get output buffer */ - frame->nb_samples = (out + avctx->ch_layout.nb_channels - 1) / avctx->ch_layout.nb_channels; - if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) - return ret; - output_samples = (int16_t *)frame->data[0]; - samples_end = output_samples + out; - - switch(avctx->codec->id) { - - case AV_CODEC_ID_ROQ_DPCM: - bytestream2_skipu(&gb, 6); - - if (stereo) { - predictor[1] = sign_extend(bytestream2_get_byteu(&gb) << 8, 16); - predictor[0] = sign_extend(bytestream2_get_byteu(&gb) << 8, 16); - } else { - predictor[0] = sign_extend(bytestream2_get_le16u(&gb), 16); - } - - /* decode the samples */ - while (output_samples < samples_end) { - predictor[ch] += s->array[bytestream2_get_byteu(&gb)]; - predictor[ch] = av_clip_int16(predictor[ch]); - *output_samples++ = predictor[ch]; - - /* toggle channel */ - ch ^= stereo; - } - break; - - case AV_CODEC_ID_INTERPLAY_DPCM: - bytestream2_skipu(&gb, 6); /* skip over the stream mask and stream length */ - - for (ch = 0; ch < avctx->ch_layout.nb_channels; ch++) { - predictor[ch] = sign_extend(bytestream2_get_le16u(&gb), 16); - *output_samples++ = predictor[ch]; - } - - ch = 0; - while (output_samples < samples_end) { - predictor[ch] += interplay_delta_table[bytestream2_get_byteu(&gb)]; - predictor[ch] = av_clip_int16(predictor[ch]); - *output_samples++ = predictor[ch]; - - /* toggle channel */ - ch ^= stereo; - } - break; - - case AV_CODEC_ID_XAN_DPCM: - { - int shift[2] = { 4, 4 }; - - for (ch = 0; ch < avctx->ch_layout.nb_channels; ch++) - predictor[ch] = sign_extend(bytestream2_get_le16u(&gb), 16); - - ch = 0; - while (output_samples < samples_end) { - int diff = bytestream2_get_byteu(&gb); - int n = diff & 3; - - if (n == 3) - shift[ch]++; - else - shift[ch] -= (2 * n); - diff = sign_extend((diff &~ 3) << 8, 16); - - /* saturate the shifter to 0..31 */ - shift[ch] = av_clip_uintp2(shift[ch], 5); - - diff >>= shift[ch]; - predictor[ch] += diff; - - predictor[ch] = av_clip_int16(predictor[ch]); - *output_samples++ = predictor[ch]; - - /* toggle channel */ - ch ^= stereo; - } - break; - } - case AV_CODEC_ID_SOL_DPCM: - if (avctx->codec_tag != 3) { - uint8_t *output_samples_u8 = frame->data[0], - *samples_end_u8 = output_samples_u8 + out; - while (output_samples_u8 < samples_end_u8) { - int n = bytestream2_get_byteu(&gb); - - s->sample[0] += s->sol_table[n >> 4]; - s->sample[0] = av_clip_uint8(s->sample[0]); - *output_samples_u8++ = s->sample[0]; - - s->sample[stereo] += s->sol_table[n & 0x0F]; - s->sample[stereo] = av_clip_uint8(s->sample[stereo]); - *output_samples_u8++ = s->sample[stereo]; - } - } else { - while (output_samples < samples_end) { - int n = bytestream2_get_byteu(&gb); - if (n & 0x80) s->sample[ch] -= sol_table_16[n & 0x7F]; - else s->sample[ch] += sol_table_16[n & 0x7F]; - s->sample[ch] = av_clip_int16(s->sample[ch]); - *output_samples++ = s->sample[ch]; - /* toggle channel */ - ch ^= stereo; - } - } - break; - - case AV_CODEC_ID_CBD2_DPCM: - case AV_CODEC_ID_SDX2_DPCM: - while (output_samples < samples_end) { - int8_t n = bytestream2_get_byteu(&gb); - - if (!(n & 1)) - s->sample[ch] = 0; - s->sample[ch] += s->array[n + 128]; - s->sample[ch] = av_clip_int16(s->sample[ch]); - *output_samples++ = s->sample[ch]; - ch ^= stereo; - } - break; - - case AV_CODEC_ID_GREMLIN_DPCM: { - int idx = 0; - - while (output_samples < samples_end) { - uint8_t n = bytestream2_get_byteu(&gb); - - *output_samples++ = s->sample[idx] += (unsigned)s->array[n]; - idx ^= 1; - } - } - break; - - case AV_CODEC_ID_DERF_DPCM: { - int idx = 0; - - while (output_samples < samples_end) { - uint8_t n = bytestream2_get_byteu(&gb); - int index = FFMIN(n & 0x7f, 95); - - s->sample[idx] += (n & 0x80 ? -1: 1) * derf_steps[index]; - s->sample[idx] = av_clip_int16(s->sample[idx]); - *output_samples++ = s->sample[idx]; - idx ^= stereo; - } - } - break; - - case AV_CODEC_ID_WADY_DPCM: { - int idx = 0; - - while (output_samples < samples_end) { - const uint8_t n = bytestream2_get_byteu(&gb); - - if (n & 0x80) - s->sample[idx] = sign_extend((n & 0x7f) << 9, 16); - else - s->sample[idx] += s->scale * wady_table[n & 0x7f]; - *output_samples++ = av_clip_int16(s->sample[idx]); - idx ^= stereo; - } - } - break; - } - - *got_frame_ptr = 1; - - return avpkt->size; -} - -static void dpcm_flush(AVCodecContext *avctx) -{ - DPCMContext *s = avctx->priv_data; - - s->sample[0] = s->sample[1] = 0; -} - -#define DPCM_DECODER(id_, name_, long_name_) \ -const FFCodec ff_ ## name_ ## _decoder = { \ - .p.name = #name_, \ - CODEC_LONG_NAME(long_name_), \ - .p.type = AVMEDIA_TYPE_AUDIO, \ - .p.id = id_, \ - .p.capabilities = AV_CODEC_CAP_DR1, \ - .priv_data_size = sizeof(DPCMContext), \ - .init = dpcm_decode_init, \ - .flush = dpcm_flush, \ - FF_CODEC_DECODE_CB(dpcm_decode_frame), \ -} - -DPCM_DECODER(AV_CODEC_ID_CBD2_DPCM, cbd2_dpcm, "DPCM Cuberoot-Delta-Exact"); -DPCM_DECODER(AV_CODEC_ID_DERF_DPCM, derf_dpcm, "DPCM Xilam DERF"); -DPCM_DECODER(AV_CODEC_ID_GREMLIN_DPCM, gremlin_dpcm, "DPCM Gremlin"); -DPCM_DECODER(AV_CODEC_ID_INTERPLAY_DPCM, interplay_dpcm, "DPCM Interplay"); -DPCM_DECODER(AV_CODEC_ID_ROQ_DPCM, roq_dpcm, "DPCM id RoQ"); -DPCM_DECODER(AV_CODEC_ID_SDX2_DPCM, sdx2_dpcm, "DPCM Squareroot-Delta-Exact"); -DPCM_DECODER(AV_CODEC_ID_SOL_DPCM, sol_dpcm, "DPCM Sol"); -DPCM_DECODER(AV_CODEC_ID_XAN_DPCM, xan_dpcm, "DPCM Xan"); -DPCM_DECODER(AV_CODEC_ID_WADY_DPCM, wady_dpcm, "DPCM Marble WADY"); diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/flashsvenc.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/flashsvenc.c deleted file mode 100644 index 5cf0602f5dacfbf7d37fb5e7c3451e516e4c31ef..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/flashsvenc.c +++ /dev/null @@ -1,260 +0,0 @@ -/* - * Flash Screen Video encoder - * Copyright (C) 2004 Alex Beregszaszi - * Copyright (C) 2006 Benjamin Larsson - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/* Encoding development sponsored by http://fh-campuswien.ac.at */ - -/** - * @file - * Flash Screen Video encoder - * @author Alex Beregszaszi - * @author Benjamin Larsson - * - * A description of the bitstream format for Flash Screen Video version 1/2 - * is part of the SWF File Format Specification (version 10), which can be - * downloaded from http://www.adobe.com/devnet/swf.html. - */ - -/* - * Encoding ideas: A basic encoder would just use a fixed block size. - * Block sizes can be multiples of 16, from 16 to 256. The blocks don't - * have to be quadratic. A brute force search with a set of different - * block sizes should give a better result than to just use a fixed size. - * - * TODO: - * Don't reencode the frame in brute force mode if the frame is a dupe. - * Speed up. Make the difference check faster. - */ - -#include -#include - -#include "libavutil/buffer.h" - -#include "avcodec.h" -#include "codec_internal.h" -#include "encode.h" -#include "put_bits.h" -#include "bytestream.h" - -/* These values are hardcoded for now. */ -#define BLOCK_WIDTH (4 * 16U) -#define BLOCK_HEIGHT (4 * 16U) - -typedef struct FlashSVContext { - AVCodecContext *avctx; - const uint8_t *previous_frame; - AVBufferRef *prev_frame_buf; - int image_width, image_height; - unsigned packet_size; - int64_t last_key_frame; - uint8_t tmpblock[3 * 256 * 256]; -} FlashSVContext; - -static int copy_region_enc(const uint8_t *sptr, uint8_t *dptr, int dx, int dy, - int h, int w, int stride, const uint8_t *pfptr) -{ - int i, j; - int diff = 0; - - for (i = dx + h; i > dx; i--) { - const uint8_t *nsptr = sptr + i * stride + dy * 3; - const uint8_t *npfptr = pfptr + i * stride + dy * 3; - for (j = 0; j < w * 3; j++) { - diff |= npfptr[j] ^ nsptr[j]; - dptr[j] = nsptr[j]; - } - dptr += w * 3; - } - if (diff) - return 1; - return 0; -} - -static av_cold int flashsv_encode_end(AVCodecContext *avctx) -{ - FlashSVContext *s = avctx->priv_data; - - av_buffer_unref(&s->prev_frame_buf); - - return 0; -} - -static av_cold int flashsv_encode_init(AVCodecContext *avctx) -{ - FlashSVContext *s = avctx->priv_data; - int h_blocks, v_blocks, nb_blocks; - - s->avctx = avctx; - - if (avctx->width > 4095 || avctx->height > 4095) { - av_log(avctx, AV_LOG_ERROR, - "Input dimensions too large, input must be max 4095x4095 !\n"); - return AVERROR_INVALIDDATA; - } - - s->last_key_frame = 0; - - s->image_width = avctx->width; - s->image_height = avctx->height; - - h_blocks = (s->image_width + BLOCK_WIDTH - 1) / BLOCK_WIDTH; - v_blocks = (s->image_height + BLOCK_WIDTH - 1) / BLOCK_WIDTH; - nb_blocks = h_blocks * v_blocks; - s->packet_size = 4 + nb_blocks * (2 + 3 * BLOCK_WIDTH * BLOCK_HEIGHT); - - return 0; -} - - -static int encode_bitstream(FlashSVContext *s, const AVFrame *p, uint8_t *buf, - int buf_size, int block_width, int block_height, - const uint8_t *previous_frame, int *I_frame) -{ - - PutBitContext pb; - int h_blocks, v_blocks, h_part, v_part, i, j; - int buf_pos, res; - int pred_blocks = 0; - - init_put_bits(&pb, buf, buf_size); - - put_bits(&pb, 4, block_width / 16 - 1); - put_bits(&pb, 12, s->image_width); - put_bits(&pb, 4, block_height / 16 - 1); - put_bits(&pb, 12, s->image_height); - flush_put_bits(&pb); - buf_pos = 4; - - h_blocks = s->image_width / block_width; - h_part = s->image_width % block_width; - v_blocks = s->image_height / block_height; - v_part = s->image_height % block_height; - - /* loop over all block columns */ - for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) { - - int y_pos = j * block_height; // vertical position in frame - int cur_blk_height = (j < v_blocks) ? block_height : v_part; - - /* loop over all block rows */ - for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) { - int x_pos = i * block_width; // horizontal position in frame - int cur_blk_width = (i < h_blocks) ? block_width : h_part; - int ret = Z_OK; - uint8_t *ptr = buf + buf_pos; - - /* copy the block to the temp buffer before compression - * (if it differs from the previous frame's block) */ - res = copy_region_enc(p->data[0], s->tmpblock, - s->image_height - (y_pos + cur_blk_height + 1), - x_pos, cur_blk_height, cur_blk_width, - p->linesize[0], previous_frame); - - if (res || *I_frame) { - unsigned long zsize = 3 * block_width * block_height; - ret = compress2(ptr + 2, &zsize, s->tmpblock, - 3 * cur_blk_width * cur_blk_height, 9); - - if (ret != Z_OK) - av_log(s->avctx, AV_LOG_ERROR, - "error while compressing block %dx%d\n", i, j); - - bytestream_put_be16(&ptr, zsize); - buf_pos += zsize + 2; - ff_dlog(s->avctx, "buf_pos = %d\n", buf_pos); - } else { - pred_blocks++; - bytestream_put_be16(&ptr, 0); - buf_pos += 2; - } - } - } - - if (pred_blocks) - *I_frame = 0; - else - *I_frame = 1; - - return buf_pos; -} - - -static int flashsv_encode_frame(AVCodecContext *avctx, AVPacket *pkt, - const AVFrame *pict, int *got_packet) -{ - FlashSVContext * const s = avctx->priv_data; - const uint8_t *prev_frame = s->previous_frame; - int res; - int I_frame = 0; - int opt_w = 4, opt_h = 4; - - /* First frame needs to be a keyframe */ - if (!s->previous_frame) { - prev_frame = pict->data[0]; - I_frame = 1; - } - - /* Check the placement of keyframes */ - if (avctx->gop_size > 0 && - avctx->frame_num >= s->last_key_frame + avctx->gop_size) { - I_frame = 1; - } - - res = ff_alloc_packet(avctx, pkt, s->packet_size); - if (res < 0) - return res; - - pkt->size = encode_bitstream(s, pict, pkt->data, pkt->size, - opt_w * 16, opt_h * 16, - prev_frame, &I_frame); - - //mark the frame type so the muxer can mux it correctly - if (I_frame) { - s->last_key_frame = avctx->frame_num; - ff_dlog(avctx, "Inserting keyframe at frame %"PRId64"\n", avctx->frame_num); - } - - if (I_frame) - pkt->flags |= AV_PKT_FLAG_KEY; - *got_packet = 1; - - //save the current frame - res = av_buffer_replace(&s->prev_frame_buf, pict->buf[0]); - if (res < 0) - return res; - s->previous_frame = pict->data[0]; - - return 0; -} - -const FFCodec ff_flashsv_encoder = { - .p.name = "flashsv", - CODEC_LONG_NAME("Flash Screen Video"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_FLASHSV, - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, - .priv_data_size = sizeof(FlashSVContext), - .init = flashsv_encode_init, - FF_CODEC_ENCODE_CB(flashsv_encode_frame), - .close = flashsv_encode_end, - .p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE }, -}; diff --git a/spaces/congsaPfin/Manga-OCR/logs/City Driving School Car Games APK How to Become a Pro Driver.md b/spaces/congsaPfin/Manga-OCR/logs/City Driving School Car Games APK How to Become a Pro Driver.md deleted file mode 100644 index 884582e8e72e0b447d0413f79d358da5fc3f60fe..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/City Driving School Car Games APK How to Become a Pro Driver.md +++ /dev/null @@ -1,134 +0,0 @@ -
-

City Driving School Car Games APK: A Fun and Educational Game for Car Lovers

-

If you love cars and driving, you might be interested in trying out City Driving School Car Games APK, a game that simulates the experience of learning how to drive and park in a realistic urban environment. This game is not only entertaining, but also educational, as it teaches you the basic skills and rules of driving and parking in different scenarios. In this article, we will review the features, benefits, and drawbacks of this game, and answer some frequently asked questions about it.

-

city driving school car games apk


Download Ziphttps://urlca.com/2uOgt6



-

What is City Driving School Car Games APK?

-

City Driving School Car Games APK is an Android game developed by Better Games Studio Pty Ltd. It is a racing game that combines driving school and parking simulator elements. The game has over 10 million downloads on Google Play and a 4.2-star rating from more than 100 thousand users. The game is free to download and play, but it contains ads and in-app purchases.

-

How to Download and Install City Driving School Car Games APK?

-

To download and install City Driving School Car Games APK, you need to follow these steps:

-
    -
  1. Go to the Google Play Store or the official website of the game.
  2. -
  3. Tap on the "Install" or "Download APK" button.
  4. -
  5. Wait for the download to finish.
  6. -
  7. Open the downloaded file and follow the instructions to install the game.
  8. -
  9. Launch the game and enjoy!
  10. -
-

What are the Features of City Driving School Car Games APK?

-

City Driving School Car Games APK has many features that make it an enjoyable and challenging game for car lovers. Some of these features are:

-
    -
  • Multiple cars: You can choose from a variety of luxury, turbo, and sports cars to drive and park in the game. Each car has its own characteristics and performance.
  • -
  • Realistic environment: The game has a stunning 3D graphics and sound effects that create a realistic city atmosphere. You can see flyover bridges, freeways, traffic lights, roundabouts, pedestrians, and other vehicles on the road.
  • -
  • Different modes: The game has different modes that test your driving and parking skills in various situations. You can learn the basics of driving and parking in the driving school mode, where an instructor will guide you through different lessons. You can also try the free drive mode, where you can explore the city at your own pace. Or you can challenge yourself in the parking mode, where you have to park your car in different spots within a limited time.
  • -
  • Different levels: The game has over 100 levels that increase in difficulty as you progress. You have to complete each level with a minimum number of mistakes and within a given time limit to earn stars and unlock new cars and levels.
  • -
  • Realistic physics: The game has a realistic physics engine that simulates the behavior of the cars and the environment. You have to control the steering, braking, acceleration, and gear shifting of your car according to the road conditions and traffic rules. You also have to avoid collisions and damage to your car.
  • -
-

What are the Benefits of Playing City Driving School Car Games APK?

-

Playing City Driving School Car Games APK can have several benefits for you, such as:

-
    -
  • Entertainment: The game is fun and addictive, as it offers you a variety of cars, modes, levels, and challenges to keep you entertained for hours.
  • -
  • Educational: The game is educational, as it teaches you the basic skills and rules of driving and parking in a realistic city environment. You can learn how to steer, brake, accelerate, shift gears, avoid obstacles, follow traffic signals, and park your car in different spots.
  • -
  • Skill development: The game is skill development, as it improves your hand-eye coordination, reflexes, concentration, problem-solving, decision-making, and time management skills. You have to react quickly and accurately to the changing road situations and complete each level with minimum errors and maximum speed.
  • -
-

What are the Drawbacks of Playing City Driving School Car Games APK?

-

Playing City Driving School Car Games APK can also have some drawbacks for you, such as:

-
    -
  • Ads and in-app purchases: The game is free to download and play, but it contains ads and in-app purchases that can be annoying and costly. You have to watch ads to earn coins or unlock some features, or you have to pay real money to buy coins or remove ads.
  • -
  • Battery and storage consumption: The game is battery and storage consuming, as it has high-quality graphics and sound effects that require a lot of power and space. You have to make sure that your device has enough battery and storage capacity to run the game smoothly.
  • -
  • Potential addiction: The game is potentially addictive, as it can make you spend a lot of time and money on it. You have to be careful not to neglect your other responsibilities and activities because of the game.
  • -
-

How to Play City Driving School Car Games APK?

-

To play City Driving School Car Games APK, you need to follow these steps:

-
    -
  1. Select a car from the garage. You can unlock more cars by earning stars or buying coins.
  2. -
  3. Select a mode from the menu. You can choose from driving school, free drive, or parking mode.
  4. -
  5. Select a level from the map. You can unlock more levels by earning stars or buying coins.
  6. -
  7. Follow the instructions on the screen. You have to control the steering, braking, acceleration, and gear shifting of your car using the buttons on the screen. You also have to follow the traffic rules and avoid collisions and damage to your car.
  8. -
  9. Complete the level with minimum mistakes and maximum speed. You have to reach the destination or park your car in the designated spot within the time limit and without exceeding the error limit.
  10. -
  11. Earn stars and coins based on your performance. You can use them to unlock new cars and levels.
  12. -
-

A Comparison Table of City Driving School Car Games APK and Other Similar Games

-

There are many other games that are similar to City Driving School Car Games APK, such as Real Car Parking 2, Driving Academy, and Car Parking Multiplayer. Here is a comparison table of some of their features:

- - - - - - - -< - - - - -
FeatureCity Driving School Car Games APKReal Car Parking 2Driving AcademyCar Parking Multiplayer
CarsOver 20 luxury, turbo, and sports carsOver 80 realistic cars with interior viewOver 135 cars with different models and colorsOver 70 cars with customization options
EnvironmentA realistic city with flyover bridges, freeways, traffic lights, roundabouts, pedestrians, and other vehiclesA realistic city with parking lots, gas stations, car washes, highways, and traffic jamsA realistic city with roads, signs, signals, pedestrians, animals, traffic rules, and weather conditionsA realistic open world with different maps, such as city, airport, desert, port, etc.
ModesDriving school, free drive, and parking modeCareer mode, free mode, online mode, and parking modeCareer mode, free drive mode, night mode, challenge mode, road signs quiz modeSingle player mode and multiplayer mode with voice chat
LevelsOver 100 levels with increasing difficultyOver 150 levels with different parking scenariosOver 250 levels with different driving situationsNo levels but unlimited exploration and interaction with other players
PhysicsA realistic physics engine that simulates the behavior of the cars and the environmentA realistic physics engine that simulates the behavior of the cars and the environmentA realistic physics engine that simulates the behavior of the cars and the environmentA realistic physics engine that simulates the behavior of the cars and the environment
RatingsA 4.2-star rating from more than 100 thousand users on Google PlayA 4.3-star rating from more than 1 million users on Google PlayA 4.1-star rating from more than 100 thousand users on Google PlayA 4.4-star rating from more than 1 million users on Google Play
-

Conclusion

-

City Driving School Car Games APK is a fun and educational game for car lovers who want to learn how to drive and park in a realistic city environment. The game has many features, benefits, and drawbacks that make it an enjoyable and challenging game. You can download and install the game for free from the Google Play Store or the official website of the game. You can also compare the game with other similar games and see which one suits your preferences better.

-

city driving school simulator car parking games apk
-city driving school 3d car games apk download
-city driving school car games 2021 apk
-city driving school car games mod apk
-city driving school car games offline apk
-city driving school car games free apk
-city driving school car games hack apk
-city driving school car games online apk
-city driving school car games pro apk
-city driving school car games latest apk
-city driving school car games android apk
-city driving school car games 3d apk
-city driving school car games hd apk
-city driving school car games new apk
-city driving school car games best apk
-city driving school car games realistic apk
-city driving school car games fun apk
-city driving school car games challenging apk
-city driving school car games manual apk
-city driving school car games easy apk
-city driving school car games hard apk
-city driving school car games extreme apk
-city driving school car games ultimate apk
-city driving school car games amazing apk
-city driving school car games awesome apk
-city driving school car games cool apk
-city driving school car games crazy apk
-city driving school car games fast apk
-city driving school car games furious apk
-city driving school car games super apk
-city driving school car games deluxe apk
-city driving school car games premium apk
-city driving school car games unlimited apk
-city driving school car games full apk
-city driving school car games lite apk
-city driving school car racing games apk
-city driving school sports car games apk
-city driving school luxury car games apk
-city driving school modern car games apk
-city driving school classic car games apk
-city driving school muscle car games apk
-city driving school police car games apk
-city driving school taxi cab games apk
-city driving school bus simulator games apk
-city driving academy learning car parking games apk
-real parking master:city driving school car game-s 2021apk
-manual gear box:city drivingschoolcar game-s 3dapk
-real driver:city drivingschoolcar game-s simulatorapk
-ultimate parking:city drivingschoolcar game-s 2020apk

-

Frequently Asked Questions

-

Q: Is City Driving School Car Games APK safe to download and play?

-

A: Yes, City Driving School Car Games APK is safe to download and play, as long as you download it from a trusted source, such as the Google Play Store or the official website of the game. The game does not contain any viruses, malware, or spyware that can harm your device or data.

-

Q: How can I remove ads and buy coins in City Driving School Car Games APK?

-

A: You can remove ads and buy coins in City Driving School Car Games APK by making in-app purchases with real money. You can also watch ads to earn coins or unlock some features for free.

-

Q: How can I save my progress and settings in City Driving School Car Games APK?

-

A: You can save your progress and settings in City Driving School Car Games APK by signing in with your Google account. This way, you can sync your data across different devices and resume your game from where you left off.

-

Q: How can I contact the developer of City Driving School Car Games APK?

-

A: You can contact the developer of City Driving School Car Games APK by sending an email to bettergamesstudio@gmail.com or by visiting their Facebook page. You can also leave a review or a comment on the Google Play Store or the official website of the game.

-

Q: What are some tips and tricks for playing City Driving School Car Games APK?

-

A: Some tips and tricks for playing City Driving School Car Games APK are:

-
    -
  • Choose a car that suits your driving style and preference. You can test different cars in the free drive mode before buying them.
  • -
  • Follow the instructions and feedback of the instructor in the driving school mode. They will help you learn the basics of driving and parking.
  • -
  • Use the camera angles and mirrors to get a better view of the road and your car. You can switch between different camera modes by tapping on the camera icon on the screen.
  • -
  • Be careful not to damage your car or hit other vehicles or objects. This will reduce your score and increase your error limit.
  • -
  • Practice and replay each level until you master it. You can improve your skills and earn more stars and coins by doing so.
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Gamemoding with Friends How to Collaborate and Play with Other Modders.md b/spaces/congsaPfin/Manga-OCR/logs/Gamemoding with Friends How to Collaborate and Play with Other Modders.md deleted file mode 100644 index 5e9d061bcc38ba3c8a70d3fb674cd5c0a126b59e..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Gamemoding with Friends How to Collaborate and Play with Other Modders.md +++ /dev/null @@ -1,147 +0,0 @@ - - - -
-

Gamemoding: What Is It And Why You Should Try It

-

Have you ever played a video game and wished you could change something about it? Maybe you wanted to improve the graphics, add new features, or create a whole new story. If so, you might be interested in gamemoding.

-

gamemoding


Download File >>> https://urlca.com/2uOaVE



-

Gamemoding is short for video game modding (modification), which is the process of altering or creating new content for a video game by players or fans. Gamemoding can range from small changes and tweaks to complete overhauls that transform the original game into something new. Gamemoding can extend the replay value and interest of a game by adding variety, challenge, customization, and innovation.

-

Gamemoding is not only fun but also rewarding. It can help you learn new skills, express your creativity, connect with other gamers, and even advance your career in game development. In this article, we will explain how gamemoding works, how to start gamemoding, what are the benefits and challenges of gamemoding, and provide some examples of popular games and mods. Let's get started!

-

How Gamemoding Works

-

Gamemoding works by modifying the files and data of a video game, either by editing the existing ones or adding new ones. Depending on the game and the mod, this can involve different tools and techniques, such as:

-
    -
  • Using a game editor or an engine that allows you to create and modify levels, maps, characters, objects, etc. For example, the Unreal Engine or the Creation Kit.
  • -
  • Using a scripting language or a programming language that allows you to write code that controls the logic, behavior, and interaction of the game elements. For example, Lua or C++.
  • -
  • Using a graphics software or a modeling software that allows you to create and edit textures, models, animations, effects, etc. For example, Photoshop or Blender.
  • -
  • Using a sound software or a music software that allows you to create and edit sounds, music, voices, etc. For example, Audacity or FL Studio.
  • -
-

Some games are more mod-friendly than others, meaning they provide more access and support for modding. Some games have official modding tools and platforms that are released by the developers or publishers of the game. For example, Steam Workshop or Bethesda.net. Some games have unofficial modding tools and platforms that are created by the modding community. For example, Nexus Mods or Mod DB.

-

Types Of Gamemoding

-

There are many types of gamemoding, depending on the scope and purpose of the mod. Here are some common types of gamemoding:

-
    -
  • Graphics mods: These mods improve or change the visual appearance of the game, such as the lighting, colors, textures, models, etc. For example, ENB Series or HD Texture Packs.
  • -
  • Gameplay mods: These mods improve or change the gameplay mechanics of the game, such as the combat, movement, difficulty, balance, etc. For example, SkyUI or Project Nevada.
  • -
  • Content mods: These mods add new content to the game, such as new levels, quests, items, characters, etc. For example, The Forgotten City or The Stanley Parable.
  • -
  • Total conversions: These mods transform the game into a completely different game genre or setting, using the same engine and assets. For example, These are software that allow you to create and modify the game world, such as the levels, maps, characters, objects, etc. For example, Unreal Engine or Creation Kit.
  • -
  • Scripting languages or programming languages: These are languages that allow you to write code that controls the logic, behavior, and interaction of the game elements. For example, Lua or C++.
  • -
  • Graphics software or modeling software: These are software that allow you to create and edit the visual aspects of the game, such as the textures, models, animations, effects, etc. For example, Photoshop or Blender.
  • -
  • Sound software or music software: These are software that allow you to create and edit the audio aspects of the game, such as the sounds, music, voices, etc. For example, Audacity or FL Studio.
  • -
-

Join A Modding Community

-

The final step is to join a modding community that can help you with your gamemoding journey. You can find modding communities on various platforms, such as forums, websites, social media, etc. You can also join specific modding communities for the game or the mod you are interested in. By joining a modding community, you can:

-
    -
  • Get feedback: You can get feedback on your mod from other modders and players who can offer suggestions, critiques, and compliments.
  • -
  • Get support: You can get support on your mod from other modders and experts who can answer your questions, solve your problems, and share their knowledge.
  • -
  • Get inspiration: You can get inspiration on your mod from other mods and projects that can spark your creativity and motivate you.
  • -
  • Get collaboration: You can get collaboration on your mod from other modders and teams who can work with you on your mod and contribute their skills and ideas.
  • -
-

Benefits Of Gamemoding

-

Gamemoding can benefit you in many ways, both personally and professionally. Here are some of the benefits of gamemoding:

-

Personal Benefits

-

Gamemoding can provide you with personal benefits such as:

-

How to mod video games
-Best mods for Skyrim
-Video game modding tutorials
-Fallout 4 modding guide
-Minecraft modding tools
-GTA 5 modding online
-Steam workshop mods
-Mod DB downloads
-Video game modding jobs
-Half-Life 2 modding
-Sims 4 modding folder
-Doom modding wiki
-Witcher 3 modding nexus
-Stardew Valley modding api
-Terraria modding tmodloader
-Kerbal Space Program modding
-Garry's Mod modding workshop
-XCOM 2 modding tutorial
-Dark Souls modding reddit
-Civilization 6 modding steam
-Rimworld modding tutorial
-Subnautica modding nitrox
-Euro Truck Simulator 2 modding
-Crusader Kings 3 modding guide
-Factorio modding portal
-Mount and Blade 2 modding tools
-Cities Skylines modding assets
-Age of Empires 2 modding hd edition
-Total War Warhammer 2 modding workshop
-Baldur's Gate 3 modding nexus
-Resident Evil 2 modding mr x
-Dragon Age Origins modding toolset
-Mass Effect Andromeda modding frosty
-Far Cry 5 modding arcade mode
-Borderlands 3 modding save editor
-Assassin's Creed Odyssey modding visual customization system
-Red Dead Redemption 2 modding script hook rdr2
-Horizon Zero Dawn modding pc version
-Cyberpunk 2077 modding tools release date
-The Elder Scrolls Online modding addons manager
-World of Warcraft modding addons curseforge
-League of Legends modding custom skins wooxy
-Dota 2 modding custom games workshop tools alpha
-Counter Strike Global Offensive modding maps workshop publisher tool
-Call of Duty Black Ops Cold War modding zombies mode
-Battlefield V modding spectator mode
-Fortnite modding creative mode
-Roblox modding studio
-Among Us modding impostor mode
-Valheim modding server

-
    -
  • Fun: Gamemoding can be a fun and enjoyable hobby that allows you to play games in new and exciting ways.
  • -
  • Learning: Gamemoding can be a learning opportunity that allows you to acquire new skills and knowledge in various fields such as programming, design, art, etc.
  • -
  • Creativity: Gamemoding can be a creative outlet that allows you to express your imagination and originality in making games.
  • -
  • Expression: Gamemoding can be a form of expression that allows you to communicate your message and vision in making games.
  • -
-

Professional Benefits

-

Gamemoding can also provide you with professional benefits such as:

-
    -
  • Portfolio: Gamemoding can be a portfolio builder that allows you to showcase your work and achievements in making games.
  • -
  • Skills: Gamemoding can be a skills enhancer that allows you to improve your abilities and competencies in making games.
  • -
  • Network: Gamemoding can be a network expander that allows you to connect with other people and organizations in the game industry.
  • -
  • Opportunities: Gamemoding can be an opportunity creator that allows you to access new possibilities and prospects in the game industry.
  • -
-

Challenges And Risks Of Gamemoding

-

Gamemoding is not without its challenges and risks. There are some potential difficulties and dangers that you should be aware of when gamemoding. Here are some of them:

-

Technical Challenges And Risks

-

Gamemoding can pose some technical challenges and risks such as:

-
    -
  • Compatibility: Your mod may not be compatible with the game version, the game platform, or other mods that you have installed. This can cause errors, crashes, or conflicts in the game.
  • -
  • Performance: Your mod may affect the performance of the game, such as the loading time, the frame rate, or the memory usage. This can cause lag, stutter, or freeze in the game.
  • -
  • Bugs: Your mod may have bugs or glitches that can affect the functionality or quality of the game. This can cause unexpected results, errors, or crashes in the game.
  • -
  • Security: Your mod may contain malicious code or files that can harm your computer or your data. This can cause viruses, malware, or spyware in your system.
  • -
-

Legal Challenges And Risks

-

Gamemoding can also pose some legal challenges and risks such as:

-
    -
  • Intellectual property rights: Your mod may infringe on the intellectual property rights of the game developer, publisher, or owner. This can include trademarks, copyrights, patents, etc. This can cause legal actions, cease and desist orders, or lawsuits against you.
  • -
  • Licensing agreements: Your mod may violate the licensing agreements of the game or the modding platform. This can include terms of service, end user license agreements, etc. This can cause bans, suspensions, or removals of your mod.
  • -
  • Permissions: Your mod may require permissions from the game developer, publisher, owner, or other modders. This can include credits, acknowledgments, approvals, etc. This can cause disputes, conflicts, or complaints from them.
  • -
-

Ethical Challenges And Risks

-

Gamemoding can also pose some ethical challenges and risks such as:

-
    -
  • Plagiarism: Your mod may copy or steal content from other games or mods without giving proper credit or permission. This can cause accusations, criticisms, or backlash from them.
  • -
  • Quality control: Your mod may have low quality or poor standards that can affect the reputation or experience of the game or the modding community. This can cause negative reviews, ratings, or feedback from them.
  • -
  • Content moderation: Your mod may have inappropriate or offensive content that can violate the rules or policies of the game or the modding platform. This can include violence, nudity, racism, etc. This can cause reports, warnings, or sanctions from them.
  • -
-

Conclusion

-

Gamemoding is a fascinating and rewarding activity that allows you to modify or create new content for video games. Gamemoding can enhance your gaming experience, improve your skills, express your creativity, and connect with other gamers. However, gamemoding also comes with some challenges and risks that you should be aware of and prepared for. If you are interested in gamemoding, you should choose a game and a modding platform that suit you, learn the basics of modding tools and languages, and join a modding community that can help you. Gamemoding is a great way to enjoy games in new and exciting ways. Why not give it a try?

-

FAQs

-

Here are some frequently asked questions related to gamemoding:

-
    -
  1. What is the difference between gamemoding and game development?
  2. -

    Gamemoding is a form of game development, but it is not the same as game development. Gamemoding is the process of altering or creating new content for an existing game by players or fans. Game development is the process of creating a new game from scratch by professionals or amateurs.

    -
  3. Is gamemoding legal?
  4. -

    Gamemoding is legal as long as you respect the intellectual property rights, licensing agreements, and permissions of the game developer, publisher, owner, and other modders. You should also follow the rules and policies of the game and the modding platform. You should not use gamemoding for commercial purposes or malicious intents.

    -
  5. Is gamemoding safe?
  6. -

    Gamemoding is safe as long as you use reliable and reputable modding tools and platforms, and scan your mods for viruses or malware. You should also backup your game files and data before installing or uninstalling mods, and avoid using incompatible or conflicting mods.

    -
  7. How can I find mods for my favorite games?
  8. -

    You can find mods for your favorite games on various modding platforms, such as Steam Workshop, Bethesda.net, Nexus Mods, Mod DB, etc. You can also search for mods on Google or YouTube, or ask for recommendations on forums or social media.

    -
  9. How can I share my mods with other gamers?
  10. -

    You can share your mods with other gamers by uploading them to a modding platform, such as Steam Workshop, Bethesda.net, Nexus Mods, Mod DB, etc. You can also share your mods on websites, blogs, social media, etc. You should provide a clear and detailed description of your mod, including its features, requirements, installation instructions, screenshots, videos, etc.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Gangster City Mafia Crime Download the Mod APK with Unlimited Money and More.md b/spaces/congsaPfin/Manga-OCR/logs/Gangster City Mafia Crime Download the Mod APK with Unlimited Money and More.md deleted file mode 100644 index 3b56e762b4b2107d1f1418fc1672557fa406b141..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Gangster City Mafia Crime Download the Mod APK with Unlimited Money and More.md +++ /dev/null @@ -1,121 +0,0 @@ - - - -Gangster City Mafia Crime Mod APK: A Review - - -

Gangster City Mafia Crime Mod APK: A Review

-

Do you love playing action-packed games that let you explore a vast open world, fight against enemies, and become a crime lord? If yes, then you might want to check out Gangster City Mafia Crime, a thrilling game that lets you experience the life of a gangster in a realistic city. And if you want to make your gameplay even more exciting, you might want to try Gangster City Mafia Crime Mod APK, a modified version that gives you unlimited money, gems, and other benefits. In this article, we will review Gangster City Mafia Crime and its modded version, and show you how to download and install it on your device.

-

gangster city mafia crime mod apk


Download >>>>> https://urlca.com/2uOf9k



-

What is Gangster City Mafia Crime?

-

Gangster City Mafia Crime is an action-adventure game developed by Game Pickle Studio. It is available for Android devices on Google Play Store. The game has over 10 million downloads and a rating of 4.0 out of 5 stars.

-

The gameplay of Gangster City Mafia Crime

-

In Gangster City Mafia Crime, you play as a gangster who wants to rule the city by any means necessary. You can explore a huge 3D city full of opportunities and challenges. You can drive cars, bikes, helicopters, tanks, and other vehicles. You can shoot guns, rockets, grenades, and other weapons. You can fight against rival gangs, cops, army, and other enemies. You can complete missions, earn money, buy properties, upgrade your skills, and customize your character.

-

The features of Gangster City Mafia Crime

-

Gangster City Mafia Crime has many features that make it an enjoyable game for action lovers. Some of these features are:

-

Stunning graphics and sound effects

-

The game has realistic graphics that create an immersive atmosphere. The city is detailed and lively, with different weather effects, day-night cycle, traffic, pedestrians, buildings, landmarks, etc. The sound effects are also impressive, with realistic gunshots, explosions, car sounds, etc.

-

gangster city mafia crime game mod apk
-download gangster city mafia crime mod apk
-gangster city mafia crime hack mod apk
-gangster city mafia crime simulator mod apk
-gangster city mafia crime android mod apk
-gangster city mafia crime unlimited money mod apk
-gangster city mafia crime 3d mod apk
-gangster city mafia crime offline mod apk
-gangster city mafia crime latest version mod apk
-gangster city mafia crime free mod apk
-gangster city mafia crime cheats mod apk
-gangster city mafia crime full mod apk
-gangster city mafia crime online mod apk
-gangster city mafia crime hd mod apk
-gangster city mafia crime pro mod apk
-gangster city mafia crime premium mod apk
-gangster city mafia crime vip mod apk
-gangster city mafia crime mega mod apk
-gangster city mafia crime new mod apk
-gangster city mafia crime updated mod apk
-gangster city mafia crime best mod apk
-gangster city mafia crime real mod apk
-gangster city mafia crime fun mod apk
-gangster city mafia crime super mod apk
-gangster city mafia crime extreme mod apk
-gangster city mafia crime action mod apk
-gangster city mafia crime adventure mod apk
-gangster city mafia crime shooting mod apk
-gangster city mafia crime racing mod apk
-gangster city mafia crime driving mod apk
-gangster city mafia crime fighting mod apk
-gangster city mafia crime stealth mod apk
-gangster city mafia crime survival mod apk
-gangster city mafia crime strategy mod apk
-gangster city mafia crime role playing mod apk
-gangster city mafia crime sandbox mod apk
-gangster city mafia crime open world mod apk
-gangster city mafia crime grand theft auto mod apk
-gangster city mafia crime vice town mod apk
-gangster city mafia crime vegas mod apk
-gangster city mafia crime san andreas mod apk
-gangster city mafia crime new york mod apk
-gangster city mafia crime chicago mod apk
-gangster city mafia crime los angeles mod apk
-gangster city mafia crime miami mod apk
-gangster city mafia crime london mod apk
-gangster city mafia crime tokyo mod apk
-gangster city mafia crime rio de janeiro mod apk
-gangster city mafia crime moscow mod apk

-

Various weapons and vehicles to choose from

-

The game offers a wide range of weapons and vehicles for you to use in your missions. You can choose from pistols, rifles, shotguns, snipers, machine guns, rocket launchers, etc. You can drive cars, bikes, helicopters, tanks, and other vehicles. You can shoot guns, rockets, grenades, and other weapons. You can fight against rival gangs, cops, army, and other enemies. You can complete missions, earn money, buy properties, upgrade your skills, and customize your character.

-

Realistic city environment and missions

-

The game has a realistic city environment that simulates the life of a gangster. You can interact with different people, places, and events in the city. You can rob banks, stores, cars, etc. You can escape from the police, chase enemies, or cause chaos. You can also accept missions from different characters, such as your boss, your girlfriend, your friends, etc. The missions are varied and challenging, ranging from assassinations, kidnappings, heists, races, etc.

-

Online multiplayer mode and leaderboards

-

The game also has an online multiplayer mode that lets you play with other players from around the world. You can join or create a gang with your friends or other players. You can compete with other gangs in different modes, such as deathmatch, team deathmatch, capture the flag, etc. You can also chat with other players and make new friends. The game also has leaderboards that show your rank and achievements among other players.

-

What is Gangster City Mafia Crime Mod APK?

-

Gangster City Mafia Crime Mod APK is a modified version of the original game that gives you some extra benefits that are not available in the original game. These benefits include unlimited money and gems, ad-free and bug-free experience, and easy to download and install.

-

The benefits of Gangster City Mafia Crime Mod APK

-

Gangster City Mafia Crime Mod APK has many benefits that make it a better choice than the original game. Some of these benefits are:

-

Unlimited money and gems

-

With Gangster City Mafia Crime Mod APK, you don't have to worry about running out of money or gems in the game. You can get unlimited money and gems for free. You can use them to buy anything you want in the game, such as weapons, vehicles, properties, clothes, etc. You can also upgrade your skills and abilities to the maximum level. This way, you can enjoy the game without any limitations or restrictions.

-

Ad-free and bug-free experience

-

Another benefit of Gangster City Mafia Crime Mod APK is that it removes all the annoying ads and bugs from the game. You don't have to watch ads to get rewards or unlock features in the game. You also don't have to face any glitches or errors that might ruin your gameplay. You can play the game smoothly and without any interruptions.

-

Easy to download and install

-

The last benefit of Gangster City Mafia Crime Mod APK is that it is easy to download and install on your device. You don't need to root your device or use any complicated methods to get the modded version of the game. You just need to follow some simple steps that we will show you later in this article.

-

How to download and install Gangster City Mafia Crime Mod APK?

-

If you are interested in downloading and installing Gangster City Mafia Crime Mod APK on your device, you need to follow these steps:

-

The steps to download and install Gangster City Mafia Crime Mod APK

-
    -
  1. First of all, you need to uninstall the original game from your device if you have it installed.
  2. -
  3. Then, you need to click on this link to download the Gangster City Mafia Crime Mod APK file on your device.
  4. -
  5. After downloading the file, you need to go to your device settings and enable the option of "Unknown Sources" under the security section. This will allow you to install apps from sources other than Google Play Store.
  6. -
  7. Next, you need to locate the downloaded file on your device and tap on it to start the installation process.
  8. -
  9. Finally, you need to wait for a few seconds until the installation is completed and then open the game from your app drawer.
  10. -
-

The precautions to take before downloading and installing Gangster City Mafia Crime Mod APK

-

Before downloading and installing Gangster City Mafia Crime Mod APK on your device, you need to take some precautions to avoid any problems or risks. These precautions are:

-
    -
  • You need to make sure that your device has enough storage space to accommodate the modded version of the game.
  • -
  • You need to make sure that your device has a good internet connection to download the modded version of the game.
  • -
  • You need to make sure that you download the modded version of the game from a trusted and reliable source, such as the link we provided above.
  • -
  • You need to make sure that you backup your data and progress from the original game before uninstalling it.
  • -
  • You need to make sure that you follow the installation steps carefully and correctly.
  • -
-

Conclusion

-

Gangster City Mafia Crime is a fun and exciting game that lets you become a gangster in a realistic city. You can enjoy various features, such as stunning graphics, various weapons and vehicles, realistic city environment and missions, and online multiplayer mode. However, if you want to enhance your gameplay and get unlimited money, gems, and other benefits, you can try Gangster City Mafia Crime Mod APK, a modified version of the game that gives you these advantages. You can download and install Gangster City Mafia Crime Mod APK on your device by following the steps and precautions we showed you in this article. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave them in the comments section below.

-

FAQs

-

Here are some frequently asked questions about Gangster City Mafia Crime and its modded version:

-
    -
  1. Q: Is Gangster City Mafia Crime Mod APK safe to use?
  2. -
  3. A: Yes, Gangster City Mafia Crime Mod APK is safe to use as long as you download it from a trusted and reliable source, such as the link we provided above. However, you should always be careful when downloading and installing any modded version of any game or app, as they might contain viruses or malware that could harm your device or data.
  4. -
  5. Q: Is Gangster City Mafia Crime Mod APK compatible with my device?
  6. -
  7. A: Gangster City Mafia Crime Mod APK is compatible with most Android devices that have Android 4.1 or higher. However, some devices might not support the modded version of the game due to different specifications or settings. You can check the compatibility of your device by trying to install the modded version of the game on your device.
  8. -
  9. Q: How can I update Gangster City Mafia Crime Mod APK?
  10. -
  11. A: Gangster City Mafia Crime Mod APK is updated regularly by its developers to fix any bugs or errors and add new features or improvements. You can check for updates by visiting the link we provided above or by following the official social media accounts of the game. However, you should always backup your data and progress before updating the modded version of the game, as they might get erased or corrupted during the update process.
  12. -
  13. Q: How can I uninstall Gangster City Mafia Crime Mod APK?
  14. -
  15. A: You can uninstall Gangster City Mafia Crime Mod APK by following the same steps as uninstalling any other app on your device. You can go to your device settings, find the app in the list of installed apps, and tap on it to open its details. Then, you can tap on the "Uninstall" button to remove the app from your device.
  16. -
  17. Q: Can I play Gangster City Mafia Crime Mod APK offline?
  18. -
  19. A: Yes, you can play Gangster City Mafia Crime Mod APK offline without any internet connection. However, some features of the game, such as online multiplayer mode and leaderboards, require an internet connection to function properly.
  20. -
- -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Get Real Gangster Crime MOD APK for Free No Root Required.md b/spaces/congsaPfin/Manga-OCR/logs/Get Real Gangster Crime MOD APK for Free No Root Required.md deleted file mode 100644 index 4840bd1615a6cdfd850eff6223188aaaefcfbaeb..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Get Real Gangster Crime MOD APK for Free No Root Required.md +++ /dev/null @@ -1,112 +0,0 @@ -
-

Real Gangster Crime Hack Mod APK 2021: How to Download and Install It

-

If you are a fan of open-world action games, you might have heard of Real Gangster Crime, a popular game that lets you explore a fictional city called New Vegas and become a notorious gangster. But did you know that you can make the game even more fun and exciting by using a hack mod apk? In this article, we will tell you everything you need to know about Real Gangster Crime hack mod apk 2021, including what it is, why you would want to use it, how to download and install it, what features it offers, and what are the pros and cons of using it. Read on to find out more!

-

Introduction

-

What is Real Gangster Crime?

-

Real Gangster Crime is an open-world action game developed by Naxeex Studio. It is available for free on Android devices. The game lets you play as a gangster who can roam around the city of New Vegas, which is inspired by Las Vegas. You can steal cars, fight with other gangs, shoot at cops, complete missions, and customize your character and vehicles. The game has realistic graphics, physics, and sound effects, as well as a variety of weapons and vehicles to choose from.

-

real gangster crime hack mod apk 2021


Download Zip 🌟 https://urlca.com/2uOdEN



-

What is a hack mod apk?

-

A hack mod apk is a modified version of an original app or game that has been hacked or altered to provide some extra features or benefits that are not available in the official version. For example, a hack mod apk may give you unlimited money, unlock all items, remove ads, or bypass restrictions. A hack mod apk usually comes in the form of an apk file, which is an Android application package file that contains all the components of an app or game.

-

Why would you want to use a hack mod apk for Real Gangster Crime?

-

There are many reasons why you might want to use a hack mod apk for Real Gangster Crime. Some of them are:

-
    -
  • You want to have more fun and excitement in the game by accessing features that are normally locked or limited.
  • -
  • You want to save time and effort by skipping the grind or waiting for resources.
  • -
  • You want to customize your character and vehicles to your liking without spending real money or watching ads.
  • -
  • You want to explore all the missions and locations in the game without any restrictions.
  • -
-

However, before you decide to use a hack mod apk for Real Gangster Crime, you should also be aware of the risks and drawbacks that come with it. We will discuss them later in this article.

-

How to download and install Real Gangster Crime hack mod apk 2021

-

If you are interested in using a hack mod apk for Real Gangster Crime, you will need to follow these steps:

-

Step 1: Find a reliable source for the hack mod apk file

-

The first thing you need to do is to find a trustworthy and safe website that provides the hack mod apk file for Real Gangster Crime. There are many websites that claim to offer such files, but not all of them are reliable or secure. Some of them may contain malware or viruses that can harm your device or steal your personal information. Some of them may also provide fake or outdated files that do not work or cause problems in the game. Therefore, you should do some research and check the reviews and ratings of the website before downloading anything from it. You can also use a reputable antivirus or anti-malware software to scan the file before installing it.

-

Step 2: Enable unknown sources on your device

-

The next thing you need to do is to enable unknown sources on your device. This is because the hack mod apk file is not from the official Google Play Store, and your device may block the installation of such files by default. To enable unknown sources, you need to go to your device settings, then security, then toggle on the option that allows installation of apps from unknown sources. You may also need to grant some permissions to the file manager app that you use to access the hack mod apk file.

-

Step 3: Download and install the hack mod apk file

-

The third thing you need to do is to download and install the hack mod apk file on your device. To do this, you need to go to the website that you have chosen in step 1, and click on the download button or link for the hack mod apk file. You may need to wait for a few seconds or minutes for the download to complete, depending on your internet speed and the size of the file. Once the download is done, you need to locate the file in your device storage, and tap on it to start the installation process. You may need to follow some instructions or agree to some terms and conditions during the installation.

-

Step 4: Launch the game and enjoy the features

-

The last thing you need to do is to launch the game and enjoy the features of the hack mod apk. To do this, you need to find the game icon on your device home screen or app drawer, and tap on it to open it. You may need to wait for a few seconds or minutes for the game to load, depending on your device performance and the game size. Once the game is loaded, you should be able to see and use all the features that the hack mod apk offers, such as unlimited money, weapons, vehicles, missions, locations, and no ads.

-

real gangster crime mod apk unlimited money
-real gangster crime hack apk download for android
-real gangster crime mod apk latest version 2021
-real gangster crime hack mod apk free download
-real gangster crime mod apk revdl
-real gangster crime hack apk no root
-real gangster crime mod apk android 1
-real gangster crime hack mod apk online
-real gangster crime mod apk unlimited gems
-real gangster crime hack apk ios
-real gangster crime mod apk rexdl
-real gangster crime hack mod apk offline
-real gangster crime mod apk unlimited coins
-real gangster crime hack apk an1
-real gangster crime mod apk happymod
-real gangster crime hack mod apk 5.9.4
-real gangster crime mod apk unlimited diamonds
-real gangster crime hack apk obb
-real gangster crime mod apk pure
-real gangster crime hack mod apk 2021 download
-real gangster crime mod apk unlimited everything
-real gangster crime hack apk latest version
-real gangster crime mod apk vip unlocked
-real gangster crime hack mod apk new vegas
-real gangster crime mod apk all unlocked
-real gangster crime hack apk unlimited health
-real gangster crime mod apk god mode
-real gangster crime hack mod apk android republic
-real gangster crime mod apk mega mod
-real gangster crime hack apk unlimited ammo
-real gangster crime mod apk no ads
-real gangster crime hack mod apk unlimited cars
-real gangster crime mod apk high damage
-real gangster crime hack apk all weapons unlocked
-real gangster crime mod apk no root required
-real gangster crime hack mod apk unlimited cash
-real gangster crime mod apk anti ban
-real gangster crime hack apk 100 working
-real gangster crime mod apk full version
-real gangster crime hack mod apk cheat menu

-

Features of Real Gangster Crime hack mod apk 2021

-

Now that you know how to download and install Real Gangster Crime hack mod apk 2021, you might be wondering what features it offers. Here are some of them:

-

Unlimited money

-

One of the most attractive features of Real Gangster Crime hack mod apk 2021 is that it gives you unlimited money in the game. Money is used in the game to buy weapons, vehicles, clothes, accessories, and other items that can enhance your gameplay experience. With unlimited money, you can buy anything you want without worrying about running out of cash or earning it by completing missions or watching ads.

-

Unlimited weapons and vehicles

-

Another feature of Real Gangster Crime hack mod apk 2021 is that it gives you unlimited access to all the weapons and vehicles in the game. Weapons are used in the game to fight with other gangs, cops, or enemies. Vehicles are used in the game to travel around the city, escape from chases, or perform stunts. With unlimited weapons and vehicles, you can choose any weapon or vehicle you like without having to unlock them or pay for them. You can also switch between them anytime you want without any hassle.

-

No ads

-

A third feature of Real Gangster Crime hack mod apk 2021 is that it removes all the ads from the game. Ads are annoying and distracting, and they can interrupt your gameplay or make you watch them to get some rewards or bonuses. With no ads, you can enjoy the game without any interruptions or distractions. You can also save your data and battery by not loading or watching any ads.

-

Unlock all missions and locations

-

A fourth feature of Real Gangster Crime hack mod apk 2021 is that it unlocks all the missions and locations in the game. Missions are tasks or objectives that you can complete in the game to earn money, reputation, or items. Locations are places that you can visit or explore in the game, such as casinos, hotels, clubs, or airports. With all missions and locations unlocked, you can play any mission or visit any location you want without having to complete certain requirements or reach certain levels.

-

Pros and cons of using Real Gangster Crime hack mod apk 2021

-

As you can see, Real Gangster Crime hack mod apk 2021 offers many features that can make the game more fun and exciting. However, it also has some drawbacks that you should be aware of before using it. Here are some of the pros and cons of using Real Gangster Crime hack mod apk 2021:

-

Pros

-

More fun and excitement

-

The main advantage of using Real Gangster Crime hack mod apk 2021 is that it makes the game more fun and exciting by giving you access to features that are normally locked or limited. You can have more options and possibilities in the game, and you can experiment with different weapons, vehicles, items, missions, and locations. You can also have more challenges and thrill by fighting with more enemies or performing more stunts.

-

More customization and freedom

-

Another advantage of using Real Gangster Crime hack mod apk 2021 is that it gives you more customization and freedom in the game by allowing you to buy and use anything you want without spending real money or watching ads. You can customize your character and vehicles to your liking, and you can change them anytime you want. You can also play the game at your own pace and style, without having to follow any rules or restrictions.

-

No need to spend real money or watch ads

-

A third advantage of using Real Gangster Crime hack mod apk 2021 is that it saves you from spending real money or watching ads in the game. You don't have to spend any money to buy weapons, vehicles, clothes, accessories, or other items in the game. You also don't have to watch any ads to get some rewards or bonuses in the game. You can enjoy the game for free and without any interruptions or distractions.

-

Cons

-

Risk of malware or viruses

-

The main disadvantage of using Real Gangster Crime hack mod apk 2021 is that it poses a risk of malware or viruses on your device. Since the hack mod apk file is not from the official Google Play Store, it may contain malicious code or software that can harm your device or steal your personal information. You may also download fake or outdated files that do not work or cause problems in the game. Therefore, you should be careful when downloading and installing such files, and use a reputable antivirus or anti-malware software to scan them before installing them.

-

Risk of getting banned or losing progress

-

Another disadvantage of using Real Gangster Crime hack mod apk 2021 is that it may result in getting banned or losing progress in the game. Since the hack mod apk file is a modified version of the original game, it may violate the terms and conditions of the game developer or publisher. They may detect your use of such file and ban your account or device from accessing the game server. They may also delete your progress or data in the game as a penalty for cheating. Therefore, you should be aware of the consequences of using such file, and use it at your own risk.

-

Possible compatibility or performance issues

-

A third disadvantage of using Real Gangster Crime hack mod apk 2021 is that it may cause compatibility or performance issues on your device. Since the hack mod apk file is not optimized for all devices or versions of Android, it may not work properly on your device or cause crashes, glitches, or errors. It may also affect your device performance or battery life by consuming more resources or power. Therefore, you should check the compatibility and requirements of the file before downloading and installing it, and use it with caution.

-

Conclusion

-

Real Gangster Crime is an open-world action game that lets you become a gangster in the city of New Vegas. You can use a hack mod apk to enhance your gameplay experience by accessing features that are normally locked or limited, such as unlimited money, weapons, vehicles, missions, locations, and no ads. However, you should also be aware of the risks and drawbacks of using such file, such as malware or viruses, getting banned or losing progress, and compatibility or performance issues. Therefore, you should download and install Real Gangster Crime hack mod apk 2021 from a reliable source, enable unknown sources on your device, scan the file with an antivirus or anti-malware software, and use it at your own risk.

-

FAQs

-

Here are some frequently asked questions about Real Gangster Crime hack mod apk 2021:

-
    -
  1. Is Real Gangster Crime hack mod apk 2021 safe to use?
  2. -

    Real Gangster Crime hack mod apk 2021 is not completely safe to use, as it may contain malware or viruses that can harm your device or steal your personal information. It may also violate the terms and conditions of the game developer or publisher, and result in getting banned or losing progress in the game. Therefore, you should use it with caution and at your own risk.

    -
  3. Is Real Gangster Crime hack mod apk 2021 free to use?
  4. -

    Real Gangster Crime hack mod apk 2021 is free to use, as you don't have to pay any money to download or install it. However, you may need to watch some ads or complete some surveys on the website that provides the file, as a way of supporting them or proving that you are not a robot.

    -
  5. How can I update Real Gangster Crime hack mod apk 2021?
  6. -

    Real Gangster Crime hack mod apk 2021 may not update automatically, as it is not from the official Google Play Store. You may need to check the website that provides the file regularly for any updates or new versions. You may also need to uninstall the old version and install the new version manually.

    -
  7. Can I use Real Gangster Crime hack mod apk 2021 with other mods or cheats?
  8. -

    Real Gangster Crime hack mod apk 2021 may not work well with other mods or cheats, as they may conflict or interfere with each other. You may experience crashes, glitches, errors, or reduced performance in the game. Therefore, you should use only one mod or cheat at a time.

    -
  9. Can I play Real Gangster Crime hack mod apk 2021 online or offline?
  10. -

    Real Gangster Crime hack mod apk 2021 can be played both online and offline. However, you may need an internet connection to download and install the file, as well as to access some features or content in the game. You may also need an internet connection to play with other players or sync your progress with the game server.

    -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to play Taming io APK on your Android device.md b/spaces/congsaPfin/Manga-OCR/logs/How to play Taming io APK on your Android device.md deleted file mode 100644 index 6f25a600342206528f6f2b49b495a3519434948b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to play Taming io APK on your Android device.md +++ /dev/null @@ -1,108 +0,0 @@ - -

Taming.io APK: A Survival Game with Magical Pets

-

Do you love survival games with a twist? Do you want to explore a wild forest with your own magical pet? Do you want to craft items, build a base, and battle other players and their pets? If you answered yes to any of these questions, then you should try Taming.io, a new .io game that is fun, challenging, and addictive.

-

What is Taming.io?

-

Taming.io is a 2D survival game situated in a wild forest. The game is a classical io game that is easy to play but hard to win. In the game, the player controls 2 characters - themselves (human) and their own pet of choice. The game offers plenty of pets to choose from, each animal having its own different characteristics. The game is all about resource management, defense strategies, and exploration.

-

taming io apk


Download Zip » https://urlca.com/2uOgga



-

How to download and install Taming.io APK?

-

If you want to play Taming.io on your Android device, you need to download and install the APK file. Here are the steps to do it:

-
    -
  1. Go to [Taming io APK (Android Game) - Free Download - APKCombo](^3^) and click on the green "Download APK" button.
  2. -
  3. Wait for the download to finish and then open the file.
  4. -
  5. If you see a warning message that says "Install blocked", go to your device settings and enable "Unknown sources" under security options.
  6. -
  7. Tap on "Install" and wait for the installation to complete.
  8. -
  9. Enjoy playing Taming.io on your Android device!
  10. -
-

How to play Taming.io?

-

Taming.io is a multiplayer .io game, so you will share the world with other players. Your goal is to survive as long as possible and become the savviest survivor. Here are some tips on how to play the game:

-

taming io apk download
-taming io apk mod
-taming io apk latest version
-taming io apk android
-taming io apk free
-taming io apk 2023
-taming io apk update
-taming io apk offline
-taming io apk hack
-taming io apk unlimited money
-taming io apk for pc
-taming io apk online
-taming io apk no ads
-taming io apk full version
-taming io apk old version
-taming io apk beta
-taming io apk gameplay
-taming io apk review
-taming io apk tips and tricks
-taming io apk guide
-taming io apk cheats
-taming io apk codes
-taming io apk skins
-taming io apk pets
-taming io apk weapons
-taming io apk items
-taming io apk craft
-taming io apk base
-taming io apk map
-taming io apk server
-taming io apk multiplayer
-taming io apk pvp
-taming io apk pve
-taming io apk sandbox
-taming io apk survival
-taming io apk adventure
-taming io apk simulation
-taming io apk action
-taming io apk fun
-taming io apk cute
-taming io apk fantasy
-taming io apk magic
-taming io apk pixel art
-taming io apk 2d
-taming io apk 3d
-taming io apkpure
-taming.io apkmirror
-descargar Taming.io APK
-Tải Taming.io APK
-Taiming.io APK indir

-

Craft and upgrade

-

In the beginning, it’s just you and your pet. You should quickly harvest wood from trees to begin aging and gathering resources to build structures. Weapons are essential too. Each time you age, you’ll receive a selection of rewards that boost your capabilities as a survivor. These rewards can be anything from food items to recover health, to tools and weapons. Waste no time getting started. Start building your fortress and fortify it with walls and turrets. One of the most important buildings to unlock for your base is the windmill. This structure generates gold for you automatically.

-

Tame pets

-

The unique angle of Taming is the pet battles. You get to choose from a selection of pets at the start, but you can tame up to three wild animals too. Just approach any untamed animal and see what happens! Some animals will be friendly and join you, while others will be hostile and attack you. You can also feed animals with food items to increase their loyalty. Your pets will fight by your side and help you defend your base. You can also switch between different pets by pressing the number keys.

-

Build a team

-

Since Taming is a multiplayer .io game, plenty of other gamers share the world with you. It’s a good idea to get friendly with some of them to make a team. Doing it alone will be more challenging as you face other teams and higher-level players who will dominate you. You can chat with other players using the enter key, or use emojis by pressing the C key. You can also invite other players to join your team by pressing the T key. Once you have a team, you can share resources, protect each other, and attack other teams together.

-

Tips and tricks for Taming.io

-

Taming.io is a game that requires skill, strategy, and luck. Here are some tips and tricks to help you become a better player:

-
    -
  • Choose your pet wisely. Different pets have different stats and abilities. For example, the wolf is fast and agile, the bear is strong and durable, and the dragon is powerful and rare. You should pick a pet that suits your playstyle and strategy.
  • -
  • Use your pet's special ability. Each pet has a special ability that can be activated by pressing the spacebar. For example, the wolf can howl to stun enemies, the bear can roar to heal itself, and the dragon can breathe fire to deal massive damage. Use your pet's ability wisely and at the right time to gain an advantage.
  • -
  • Be aware of your surroundings. The forest is full of dangers and opportunities. You should always keep an eye on the minimap to see where resources, enemies, and allies are. You should also watch out for environmental hazards such as lava, spikes, and traps. Use the terrain to your advantage and avoid unnecessary risks.
  • -
  • Be flexible and adaptable. Taming.io is a dynamic game that changes constantly. You should be ready to change your strategy according to the situation. For example, you might need to switch pets, relocate your base, or join a different team depending on what happens in the game.
  • -
-

Reviews of Taming.io

-

Taming.io is a game that has received positive feedback from many players. Here are some of the reviews of the game from different sources:

- - - - - -
SourceReviewRating
[Taming.io - Apps on Google Play]"This game is awesome! I love how you can tame animals and fight with them. The graphics are cute and the gameplay is smooth. I recommend this game to anyone who likes survival games."5/5
[Taming.io - Play Taming.io on Crazy Games]"Taming.io is a fun and addictive .io game that combines survival, crafting, and pet battles. The game is easy to play but hard to master. There are many pets to choose from and many strategies to try. I really enjoy playing this game with my friends."4/5
[Taming.io Review | Games Finder]"Taming.io is a unique .io game that offers a fresh take on the survival genre. The game is well-designed and balanced, with plenty of content and features to keep players engaged. The game is also very user-friendly and accessible, with simple controls and intuitive interface."4/5
-

As you can see, Taming.io is a game that has been praised for its originality, creativity, and fun factor. The game is suitable for players of all ages and preferences, as it offers a variety of options and modes to play.

-

Conclusion

-

Taming.io is a survival game with magical pets that you can download and play on your Android device. The game is a .io game that is easy to play but hard to win. The game is all about resource management, defense strategies, and exploration. You can choose from different pets, craft items, build a base, and battle other players and their pets. You can also team up with other players and form alliances. The game is fun, challenging, and addictive.

-

If you are looking for a new .io game to try out, you should give Taming.io a shot. You will not regret it!

-

Frequently Asked Questions

-
    -
  1. What are the requirements to play Taming.io on Android?
  2. -

    To play Taming.io on Android, you need an Android device with version 4.4 or higher, 100 MB of free storage space, and an internet connection.

    -
  3. How can I get more gold in Taming.io?
  4. -

    You can get more gold in Taming.io by building windmills in your base, killing enemies and their pets, harvesting resources, unlocking rewards as you age, or buying gold with real money.

    -
  5. How can I get more pets in Taming.io?
  6. -

    You can get more pets in Taming.io by taming wild animals in the forest or buying pets with real money. You can have up to three pets at a time, and you can switch between them by pressing the number keys.

    -
  7. How can I report bugs or give feedback on Taming.io?
  8. -

    You can report bugs or give feedback on Taming.io by contacting the developers through their email address: taming.io@gmail.com. You can also join their Discord server: [Taming.io] or follow their Twitter account: [@tamingio].

    -
  9. Is Taming.io safe and secure to play?
  10. -

    Yes, Taming.io is safe and secure to play. The game does not require any personal information or permissions from your device. The game also uses encryption and anti-cheat measures to protect your data and prevent hacking.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Roblox APK Snrsz Robux ndir - Hileli Oyunlar Eyalar ve Daha Fazlas.md b/spaces/congsaPfin/Manga-OCR/logs/Roblox APK Snrsz Robux ndir - Hileli Oyunlar Eyalar ve Daha Fazlas.md deleted file mode 100644 index d0ae8b1dd446dde5f3156eb098767d1a32a05fa5..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Roblox APK Snrsz Robux ndir - Hileli Oyunlar Eyalar ve Daha Fazlas.md +++ /dev/null @@ -1,162 +0,0 @@ -
-
- What are Robux and what can you do with them?
- How to download Roblox APK for Android devices?
- How to get unlimited Robux for free using hacks and cheats?
- What are the benefits of having Robux in Roblox?
- Conclusion: Is it worth downloading Roblox APK sinirsiz robux indir? | | H2: Introduction: What is Roblox and why is it popular? | - Explain what Roblox is and how it works.
- Mention some of the features and genres of Roblox games.
- Highlight the popularity and user base of Roblox. | | H2: What are Robux and what can you do with them? | - Explain what Robux are and how they are used in Roblox.
- Mention some of the ways to earn or buy Robux.
- Highlight some of the items and experiences that require or enhance with Robux. | | H2: How to download Roblox APK for Android devices? | - Explain what an APK file is and why it is needed for Android devices.
- Provide a link to download the latest version of Roblox APK from a trusted source.
- Give instructions on how to install and run the Roblox APK on Android devices. | | H2: How to get unlimited Robux for free using hacks and cheats? | - Explain why some people want to get free Robux using hacks and cheats.
- Mention some of the common methods and tools for hacking or cheating on Roblox.
- Warn about the risks and consequences of using hacks and cheats on Roblox. | | H2: What are the benefits of having Robux in Roblox? | - Explain how having Robux can enhance the gameplay and creativity in Roblox.
- Mention some of the premium features and benefits that are exclusive to Robux users.
- Give examples of some of the best items and experiences that can be bought or accessed with Robux. | | H2: Conclusion: Is it worth downloading Roblox APK sinirsiz robux indir? | - Summarize the main points of the article.
- Give an opinion on whether downloading Roblox APK sinirsiz robux indir is a good idea or not.
- Provide some alternatives or suggestions for playing Roblox safely and legally. | Table 2: Article with HTML formatting

Roblox APK Sinirsiz Robux Indir: How to Download and Play Roblox for Free with Unlimited Robux

-

If you are looking for a fun and creative way to spend your free time, you might want to check out Roblox, one of the most popular gaming platforms in the world. In this article, we will tell you everything you need to know about downloading and playing Roblox for free with unlimited robux, the in-game currency that can unlock many features and items in the game.

-

Introduction: What is Roblox and why is it popular?

-

Roblox is a platform where you can create, share, and play games with millions of people across an infinite variety of immersive, user-generated 3D worlds. You can choose from a growing library of experiences created by the community, or make your own using the powerful tools and resources provided by Roblox.

-

roblox apk sinirsiz robux indir


DOWNLOAD === https://urlca.com/2uOb9s



-

Some of the features and genres of Roblox games include:

-
    -
  • Action-adventure games, such as Jailbreak, where you can team up with other players to escape from prison or catch criminals.
  • -
  • Social games, such as Adopt Me!, where you can adopt pets, design your home, explore, role-play, and more.
  • -
  • Educational games, such as Lua Learning, where you can learn how to code in Lua, the scripting language used by Roblox.
  • -
  • Sandbox games, such as Build A Boat For Treasure, where you can build your own boat and sail it across a vast ocean.
  • -
  • Simulation games, such as Theme Park Tycoon 2, where you can build and manage your own theme park.
  • -
-

Roblox is popular because it offers endless possibilities for creativity, socialization, and entertainment. According to Roblox Corporation, the company behind the platform, Roblox has over 200 million monthly active users, and over 20 million experiences to explore. Roblox is also available on multiple devices, including Windows, Mac, iOS, Android, Xbox One, and Oculus Rift.

-

What are Robux and what can you do with them?

-

Robux are the in-game currency of Roblox. You can use them to buy items and accessories for your avatar, such as clothes, hats, hair, faces, and more. You can also use them to access premium features and benefits in some games, such as game passes, VIP servers, and developer products.

-

There are several ways to earn or buy Robux on Roblox. Some of them are:

-
    -
  • Completing surveys, offers, or tasks on third-party websites or apps that reward you with Robux.
  • -
  • Joining the Roblox Premium membership program, which gives you a monthly stipend of Robux and a 10% bonus when buying Robux.
  • -
  • Selling your own items or game passes on the Roblox Marketplace, where you can earn a percentage of the sales in Robux.
  • -
  • Creating popular games or experiences on Roblox, where you can earn Robux from players who spend them in your game.
  • -
-

Some of the items and experiences that require or enhance with Robux are:

-

roblox apk sinirsiz robux indir 2023
-roblox apk sinirsiz robux indir hileli
-roblox apk sinirsiz robux indir android
-roblox apk sinirsiz robux indir ios
-roblox apk sinirsiz robux indir bedava
-roblox apk sinirsiz robux indir nasıl yapılır
-roblox apk sinirsiz robux indir link
-roblox apk sinirsiz robux indir güncel
-roblox apk sinirsiz robux indir türkçe
-roblox apk sinirsiz robux indir pc
-roblox apk sinirsiz robux indir son sürüm
-roblox apk sinirsiz robux indir mediafire
-roblox apk sinirsiz robux indir mega
-roblox apk sinirsiz robux indir gerçek mi
-roblox apk sinirsiz robux indir yandex disk
-roblox apk sinirsiz robux indir youtube
-roblox apk sinirsiz robux indir oyun club
-roblox apk sinirsiz robux indir oyun indir club
-roblox apk sinirsiz robux indir oyunu
-roblox apk sinirsiz robux indir oyna
-roblox apk sinirsiz robux indir modlu
-roblox apk sinirsiz robux indir mod menu
-roblox apk sinirsiz robux indir mod apk
-roblox apk sinirsiz robux indir hackli
-roblox apk sinirsiz robux indir hackli 2023
-roblox apk sinirsiz robux indir hackli hileli
-roblox apk sinirsiz robux indir hackli android
-roblox apk sinirsiz robux indir hackli ios
-roblox apk sinirsiz robux indir hackli bedava
-roblox apk sinirsiz robux indir hackli nasıl yapılır
-roblox apk sinirsiz robux indir hackli link
-roblox apk sinirsiz robux indir hackli güncel
-roblox apk sinirsiz robux indir hackli türkçe
-roblox apk sinirsiz robux indir hackli pc
-roblox apk sinirsiz robux indir hackli son sürüm
-roblox apk sinirsiz robux indir hackli mediafire
-r

-
    -
  • Limited items, which are rare and unique items that have a fixed supply and can increase in value over time.
  • -
  • Game passes, which are special items that grant you access to certain features or perks in a game.
  • -
  • VIP servers, which are private servers that you can create and invite your friends to play in.
  • -
  • Developer products, which are consumable items that you can buy in a game, such as power-ups, weapons, or currency.
  • -
-

How to download Roblox APK for Android devices?

-

An APK file is an Android Package file that contains all the files and code needed to install and run an app on an Android device. You might need an APK file if you want to install an app that is not available on the Google Play Store, or if you want to get a newer or modified version of an app.

-

To download Roblox APK for Android devices, you can follow these steps:

-
    -
  1. Go to a trusted source that provides the latest version of Roblox APK. One of them is APKPure.com, where you can find the link to download Roblox APK sinirsiz robux indir.
  2. -
  3. Tap on the download button and wait for the file to be downloaded on your device.
  4. -
  5. Before installing the file, make sure you have enabled the option to install apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.
  6. -
  7. Locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the app to be installed.
  8. -
  9. Once the app is installed, you can open it and enjoy playing Roblox for free with unlimited robux.
  10. -

How to get unlimited Robux for free using hacks and cheats?

-

Some people might want to get free Robux using hacks and cheats because they want to save money, get more items, or have an advantage over other players. However, this is not a good idea, as it can have serious risks and consequences for your account and device.

-

Some of the common methods and tools for hacking or cheating on Roblox are:

-
    -
  • Modded APKs, which are modified versions of the original app that claim to give you unlimited Robux or other features.
  • -
  • Generators, which are websites or apps that claim to generate free Robux for you in exchange for your username, password, or verification.
  • -
  • Scripts, which are codes that you can run on your device or browser that claim to manipulate the game or give you free Robux.
  • -
-

Some of the risks and consequences of using hacks and cheats on Roblox are:

-
    -
  • Ban, which means your account will be permanently deleted or suspended by Roblox for violating the Terms of Service and the Community Guidelines.
  • -
  • Scam, which means you will lose your personal information, money, or items to hackers or scammers who trick you into using their hacks or cheats.
  • -
  • Virus, which means your device will be infected with malware or spyware that can harm your system, steal your data, or display unwanted ads.
  • -
-

Therefore, we strongly advise you to avoid using any hacks or cheats on Roblox, as they are illegal, unsafe, and unethical. Instead, you should play fair and earn Robux legitimately by following the methods we mentioned earlier.

-

What are the benefits of having Robux in Roblox?

-

Having Robux in Roblox can enhance your gameplay and creativity in many ways. You can use them to customize your avatar, access premium games and features, and support your favorite creators and developers. Here are some of the benefits of having Robux in Roblox:

-
    -
  • You can create your own unique style and personality by buying items and accessories for your avatar. You can also mix and match different items to create new outfits and looks.
  • -
  • You can enjoy more fun and immersive experiences by buying game passes, VIP servers, and developer products in some games. You can also unlock exclusive content and rewards that are only available to Robux users.
  • -
  • You can express your creativity and imagination by creating your own games and items on Roblox. You can also use Robux to advertise your creations and reach more players.
  • -
  • You can support the community and the platform by spending Robux on other people's games and items. You can also tip or donate Robux to your favorite creators and developers to show your appreciation.
  • -
-

Some of the best items and experiences that can be bought or accessed with Robux are:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Item/ExperienceDescriptionPrice
Roblox PremiumA membership program that gives you a monthly stipend of Robux, a 10% bonus when buying Robux, access to premium games and features, and more.$4.99 - $19.99 per month
Roblox Gift CardsA physical or digital card that you can redeem for Robux or a premium subscription. You can also use them to buy exclusive items from the Roblox Catalog.$10 - $50 per card
Roblox StudioA free tool that allows you to create your own games and items on Roblox. You can also use it to test and publish your creations.Free
BloxburgA popular game that simulates life in a fictional city. You can build your own house, work at different jobs, hang out with friends, and more.25 Robux per visit
MeepCityA social game that lets you chat, play mini-games, adopt pets, decorate your house, and more.Free (some items require Robux)
Royale HighA fantasy game that lets you attend a magical school, dress up as a princess or prince, go on quests, attend parties , and more.Free (some items require Robux)
Murder Mystery 2A thriller game that lets you play as a murderer, a sheriff, or an innocent. You have to either kill everyone, stop the killer, or survive the round.Free (some items require Robux)
-

Conclusion: Is it worth downloading Roblox APK sinirsiz robux indir?

-

In conclusion, Roblox is a great platform for anyone who loves gaming, creating, and socializing. You can download and play Roblox for free with unlimited robux by using the Roblox APK sinirsiz robux indir file. However, you should be aware of the potential risks and consequences of using hacks and cheats on Roblox, as they can get you banned, scammed, or infected. Therefore, we recommend that you play Roblox safely and legally by using the official app from the Google Play Store or the Roblox website, and by earning or buying Robux legitimately.

-

If you have any questions or feedback about Roblox APK sinirsiz robux indir, feel free to leave a comment below. We hope you enjoyed this article and found it helpful. Happy gaming!

-

FAQs

-

Here are some of the frequently asked questions about Roblox APK sinirsiz robux indir:

-
    -
  1. Is Roblox APK sinirsiz robux indir safe to use?
    -Roblox APK sinirsiz robux indir is not an official app from Roblox Corporation, and it may contain viruses, malware, or spyware that can harm your device or steal your data. It may also violate the Terms of Service and the Community Guidelines of Roblox, and result in your account being banned or deleted. Therefore, we do not recommend using Roblox APK sinirsiz robux indir.
  2. -
  3. How can I update Roblox APK sinirsiz robux indir?
    -To update Roblox APK sinirsiz robux indir, you will need to download and install the latest version of the file from a trusted source. However, this may not work if the new version of Roblox has added new security measures or features that are incompatible with the modded app. Therefore, we suggest using the official app from the Google Play Store or the Roblox website instead.
  4. -
  5. Can I play Roblox on other devices besides Android?
    -Yes, you can play Roblox on other devices besides Android, such as Windows, Mac, iOS, Xbox One, and Oculus Rift. You can download the official app for each device from the Roblox website or the respective app store.
  6. -
  7. How can I contact Roblox support?
    -If you have any issues or problems with your account, game, or item on Roblox, you can contact Roblox support by filling out a form on their Help Center. You can also check out their FAQs, Blog, and Forums for more information and tips.
  8. -
  9. How can I learn more about Roblox?
    -If you want to learn more about Roblox, you can visit their About Us page, where you can find their history, mission, vision, values, team, and partners. You can also follow them on their social media channels, such as Facebook, Twitter, Instagram, YouTube, and TikTok.
  10. -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Batman Arkham Asylum 1.0.2 Mac Native crack-torrent.rar 6 The Best Batman Game Ever Made Now Available for Mac Users.md b/spaces/contluForse/HuggingGPT/assets/Batman Arkham Asylum 1.0.2 Mac Native crack-torrent.rar 6 The Best Batman Game Ever Made Now Available for Mac Users.md deleted file mode 100644 index 715b45e1a967d3127139eb994c95ba1208a6be6f..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Batman Arkham Asylum 1.0.2 Mac Native crack-torrent.rar 6 The Best Batman Game Ever Made Now Available for Mac Users.md +++ /dev/null @@ -1,7 +0,0 @@ - -

amd radeon r9 m275x driverhp photosmart c4700 series driversios 8.4 beta 1dell inspiron 546 driversminecraft pocket edition 0.14 0 apkcorsair void usb cracklingthe crew wild run motorcyclelenovo thinkpad x201 driversbatman arkham knight screenshotsedimax br 6478ac v2
www realfootball 2009 com

dragon age origins camp storage

conexant audio driver windows 7

asrock 3tb+ unlocker utility

driver ricoh mp 4000

asus m5a88-v evo drivers

kb3189866 stuck at 45%

arma 2 operation arrowhead free download

panasonic kx mb781 drivers

apple mobile device ethernet

tsstcorp cddvdw sn 208ab
supreme kai trials dokkan
payday 2 titan safe
motorola 2247-n8
ats 8 arachnid review

-

batman arkham knight patch downloadhp envy x360 m6 driversasus a68hm k driverssound blaster audigy fx control panel downloaddead island riptide mod menu pcemma watson hack reddittranscend ts-rdf8krealtek rtl8191se driver windows 7 toshibats-h653 drivernetwork controller driver windows 8
panasonic tc-55cx850u

darksiders 2 deathinitive edition cheat table

kodi 17 beta 5

asrock fatal1ty fm2a88x+ killer

sony sxs card reader driver

broadcom bcm43142 driver windows 7

steelseries siberia v3 drivers

black ops 3 cybercore

mesut ozil fifa 16

lenovo thinkpad edge 15 drivers

yamaha receiver firmware update procedure
maximus ix hero drivers
ati firepro v7800 driver
atheros ethernet driver windows 7
intel centrino wireless n wimax 6150 driver windows 7 64 bit

-

Batman Arkham Asylum 1.0.2 Mac Native crack-torrent.rar 6


Download Ziphttps://ssurll.com/2uzwm5



-

gigabyte x58a ud3r manualdell xps 8300 ethernet drivercheat codes for batman arkham city ps3netxtreme 57xx gigabit controllernvidia gt 730 driver windows 7windows sdk for windows server 2012 r2logitech mx laser driverpreorder mean streets of gadgetzannvidia gtx 570 driversmoultrie m880 firmware update
destiny crota hard mode

windows 10 build 10547 iso

gigabyte ga-ma785gm-us2h drivers

epson wf 5690 driver

asrock n68-s3 ucc

battlefield 3 tank destroyers

ati radeon hd 2600 xt driver windows 7 64 bit

avg free antivirus 2015 review

latitude e6420 bios a05

adventure capitalist black friday event

intel serial io driver what does it do
pioneer avh x2700bs update
logitech t-bb18
wow legendary mouse software
combat mission red thunder

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Descargar mini-kms activator v1.3 office2010.16 el activador que te permite usar Office 2010 sin lmites.md b/spaces/contluForse/HuggingGPT/assets/Descargar mini-kms activator v1.3 office2010.16 el activador que te permite usar Office 2010 sin lmites.md deleted file mode 100644 index 44e32c39d3a87ec73b9312785d555d470929b8a9..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Descargar mini-kms activator v1.3 office2010.16 el activador que te permite usar Office 2010 sin lmites.md +++ /dev/null @@ -1,5 +0,0 @@ - -

Descargar Mini-kms Activator V1.3 Office2010.16DOWNLOAD: > mini kms activator office 2010 mega. mini-kms_activator_v1.2_office2010_vl_eng.exe descargar. descargar mini kms activator office 2010 gratis. descargar mini kms activator office 2010 gratis softonic. mini kms activator office 2010 descargar. mini-kms_activator_v1.3_office2010_vl_eng.exe descargar. mini-kms_activator_v1.1_office.2010.vl.eng descargar. descargar mini-kms activator 1.2 office 2010 gratis. descargar mini kms activator office 2010. descargar mini-kms_activator_v1.2_office2010_vl_eng.exe bffeec7b7edownload film Jai Vikranta movie

-

descargar mini-kms activator v1.3 office2010.16


Download Zip https://ssurll.com/2uzvBJ



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Experience the Thrill of Mixed Martial Arts with Sultan The Sultan Full Movie HD in Hindi Free Download.md b/spaces/contluForse/HuggingGPT/assets/Experience the Thrill of Mixed Martial Arts with Sultan The Sultan Full Movie HD in Hindi Free Download.md deleted file mode 100644 index b5292671e148b14ede5325cb08f63dbf7c796470..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Experience the Thrill of Mixed Martial Arts with Sultan The Sultan Full Movie HD in Hindi Free Download.md +++ /dev/null @@ -1,6 +0,0 @@ - -

download Sultan Mithun Dharmendra unlimited Movies and videos Download Here.Sultan Mithun Dharmendra Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.

-

download Dharmendra Mithun Chakraborty Action Movie unlimited Movies and videos Download Here.Dharmendra Mithun Chakraborty Action Movie Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.

-

the Sultan full movie hd in hindi free download


DOWNLOAD –––––>>> https://ssurll.com/2uzwGT



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/submodules/efficientnet_repo/onnx_optimize.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/submodules/efficientnet_repo/onnx_optimize.py deleted file mode 100644 index ee20bbf9f0f9473370489512eb96ca0b570b5388..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/submodules/efficientnet_repo/onnx_optimize.py +++ /dev/null @@ -1,84 +0,0 @@ -""" ONNX optimization script - -Run ONNX models through the optimizer to prune unneeded nodes, fuse batchnorm layers into conv, etc. - -NOTE: This isn't working consistently in recent PyTorch/ONNX combos (ie PyTorch 1.6 and ONNX 1.7), -it seems time to switch to using the onnxruntime online optimizer (can also be saved for offline). - -Copyright 2020 Ross Wightman -""" -import argparse -import warnings - -import onnx -from onnx import optimizer - - -parser = argparse.ArgumentParser(description="Optimize ONNX model") - -parser.add_argument("model", help="The ONNX model") -parser.add_argument("--output", required=True, help="The optimized model output filename") - - -def traverse_graph(graph, prefix=''): - content = [] - indent = prefix + ' ' - graphs = [] - num_nodes = 0 - for node in graph.node: - pn, gs = onnx.helper.printable_node(node, indent, subgraphs=True) - assert isinstance(gs, list) - content.append(pn) - graphs.extend(gs) - num_nodes += 1 - for g in graphs: - g_count, g_str = traverse_graph(g) - content.append('\n' + g_str) - num_nodes += g_count - return num_nodes, '\n'.join(content) - - -def main(): - args = parser.parse_args() - onnx_model = onnx.load(args.model) - num_original_nodes, original_graph_str = traverse_graph(onnx_model.graph) - - # Optimizer passes to perform - passes = [ - #'eliminate_deadend', - 'eliminate_identity', - 'eliminate_nop_dropout', - 'eliminate_nop_pad', - 'eliminate_nop_transpose', - 'eliminate_unused_initializer', - 'extract_constant_to_initializer', - 'fuse_add_bias_into_conv', - 'fuse_bn_into_conv', - 'fuse_consecutive_concats', - 'fuse_consecutive_reduce_unsqueeze', - 'fuse_consecutive_squeezes', - 'fuse_consecutive_transposes', - #'fuse_matmul_add_bias_into_gemm', - 'fuse_pad_into_conv', - #'fuse_transpose_into_gemm', - #'lift_lexical_references', - ] - - # Apply the optimization on the original serialized model - # WARNING I've had issues with optimizer in recent versions of PyTorch / ONNX causing - # 'duplicate definition of name' errors, see: https://github.com/onnx/onnx/issues/2401 - # It may be better to rely on onnxruntime optimizations, see onnx_validate.py script. - warnings.warn("I've had issues with optimizer in recent versions of PyTorch / ONNX." - "Try onnxruntime optimization if this doesn't work.") - optimized_model = optimizer.optimize(onnx_model, passes) - - num_optimized_nodes, optimzied_graph_str = traverse_graph(optimized_model.graph) - print('==> The model after optimization:\n{}\n'.format(optimzied_graph_str)) - print('==> The optimized model has {} nodes, the original had {}.'.format(num_optimized_nodes, num_original_nodes)) - - # Save the ONNX model - onnx.save(optimized_model, args.output) - - -if __name__ == "__main__": - main() diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/utils/memory.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/utils/memory.py deleted file mode 100644 index bd494780b9dbbd1571688cd270bb9b53d113c13e..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/utils/memory.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import logging -from contextlib import contextmanager -from functools import wraps -import torch - -__all__ = ["retry_if_cuda_oom"] - - -@contextmanager -def _ignore_torch_cuda_oom(): - """ - A context which ignores CUDA OOM exception from pytorch. - """ - try: - yield - except RuntimeError as e: - # NOTE: the string may change? - if "CUDA out of memory. " in str(e): - pass - else: - raise - - -def retry_if_cuda_oom(func): - """ - Makes a function retry itself after encountering - pytorch's CUDA OOM error. - It will first retry after calling `torch.cuda.empty_cache()`. - - If that still fails, it will then retry by trying to convert inputs to CPUs. - In this case, it expects the function to dispatch to CPU implementation. - The return values may become CPU tensors as well and it's user's - responsibility to convert it back to CUDA tensor if needed. - - Args: - func: a stateless callable that takes tensor-like objects as arguments - - Returns: - a callable which retries `func` if OOM is encountered. - - Examples: - :: - output = retry_if_cuda_oom(some_torch_function)(input1, input2) - # output may be on CPU even if inputs are on GPU - - Note: - 1. When converting inputs to CPU, it will only look at each argument and check - if it has `.device` and `.to` for conversion. Nested structures of tensors - are not supported. - - 2. Since the function might be called more than once, it has to be - stateless. - """ - - def maybe_to_cpu(x): - try: - like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to") - except AttributeError: - like_gpu_tensor = False - if like_gpu_tensor: - return x.to(device="cpu") - else: - return x - - @wraps(func) - def wrapped(*args, **kwargs): - with _ignore_torch_cuda_oom(): - return func(*args, **kwargs) - - # Clear cache and retry - torch.cuda.empty_cache() - with _ignore_torch_cuda_oom(): - return func(*args, **kwargs) - - # Try on CPU. This slows down the code significantly, therefore print a notice. - logger = logging.getLogger(__name__) - logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))) - new_args = (maybe_to_cpu(x) for x in args) - new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()} - return func(*new_args, **new_kwargs) - - return wrapped diff --git a/spaces/csuhan/opendet2/opendet2/modeling/roi_heads/__init__.py b/spaces/csuhan/opendet2/opendet2/modeling/roi_heads/__init__.py deleted file mode 100644 index 546b9183c1024978dd3a74e11b6cc53b43033187..0000000000000000000000000000000000000000 --- a/spaces/csuhan/opendet2/opendet2/modeling/roi_heads/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .roi_heads import OpenSetStandardROIHeads -from .box_head import FastRCNNSeparateConvFCHead, FastRCNNSeparateDropoutConvFCHead - -__all__ = list(globals().keys()) diff --git a/spaces/cvlab/zero123-live/taming-transformers/scripts/extract_depth.py b/spaces/cvlab/zero123-live/taming-transformers/scripts/extract_depth.py deleted file mode 100644 index d6aa0d80c63a3e580fa28e0f2c7af4e9ae003b64..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/taming-transformers/scripts/extract_depth.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -import torch -import numpy as np -from tqdm import trange -from PIL import Image - - -def get_state(gpu): - import torch - midas = torch.hub.load("intel-isl/MiDaS", "MiDaS") - if gpu: - midas.cuda() - midas.eval() - - midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") - transform = midas_transforms.default_transform - - state = {"model": midas, - "transform": transform} - return state - - -def depth_to_rgba(x): - assert x.dtype == np.float32 - assert len(x.shape) == 2 - y = x.copy() - y.dtype = np.uint8 - y = y.reshape(x.shape+(4,)) - return np.ascontiguousarray(y) - - -def rgba_to_depth(x): - assert x.dtype == np.uint8 - assert len(x.shape) == 3 and x.shape[2] == 4 - y = x.copy() - y.dtype = np.float32 - y = y.reshape(x.shape[:2]) - return np.ascontiguousarray(y) - - -def run(x, state): - model = state["model"] - transform = state["transform"] - hw = x.shape[:2] - with torch.no_grad(): - prediction = model(transform((x + 1.0) * 127.5).cuda()) - prediction = torch.nn.functional.interpolate( - prediction.unsqueeze(1), - size=hw, - mode="bicubic", - align_corners=False, - ).squeeze() - output = prediction.cpu().numpy() - return output - - -def get_filename(relpath, level=-2): - # save class folder structure and filename: - fn = relpath.split(os.sep)[level:] - folder = fn[-2] - file = fn[-1].split('.')[0] - return folder, file - - -def save_depth(dataset, path, debug=False): - os.makedirs(path) - N = len(dset) - if debug: - N = 10 - state = get_state(gpu=True) - for idx in trange(N, desc="Data"): - ex = dataset[idx] - image, relpath = ex["image"], ex["relpath"] - folder, filename = get_filename(relpath) - # prepare - folderabspath = os.path.join(path, folder) - os.makedirs(folderabspath, exist_ok=True) - savepath = os.path.join(folderabspath, filename) - # run model - xout = run(image, state) - I = depth_to_rgba(xout) - Image.fromarray(I).save("{}.png".format(savepath)) - - -if __name__ == "__main__": - from taming.data.imagenet import ImageNetTrain, ImageNetValidation - out = "data/imagenet_depth" - if not os.path.exists(out): - print("Please create a folder or symlink '{}' to extract depth data ".format(out) + - "(be prepared that the output size will be larger than ImageNet itself).") - exit(1) - - # go - dset = ImageNetValidation() - abspath = os.path.join(out, "val") - if os.path.exists(abspath): - print("{} exists - not doing anything.".format(abspath)) - else: - print("preparing {}".format(abspath)) - save_depth(dset, abspath) - print("done with validation split") - - dset = ImageNetTrain() - abspath = os.path.join(out, "train") - if os.path.exists(abspath): - print("{} exists - not doing anything.".format(abspath)) - else: - print("preparing {}".format(abspath)) - save_depth(dset, abspath) - print("done with train split") - - print("done done.") diff --git a/spaces/danijelpetkovic/test-tts-inference-api/README.md b/spaces/danijelpetkovic/test-tts-inference-api/README.md deleted file mode 100644 index 0079efeed6e625b47367f6b21f8ca4f9da003e0d..0000000000000000000000000000000000000000 --- a/spaces/danijelpetkovic/test-tts-inference-api/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Test Tts Inference Api -emoji: 🌍 -colorFrom: gray -colorTo: green -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/daveckw/prompt-2-sd/app.py b/spaces/daveckw/prompt-2-sd/app.py deleted file mode 100644 index 040b4b96fde3f76330d5a4f3d18458455c891815..0000000000000000000000000000000000000000 --- a/spaces/daveckw/prompt-2-sd/app.py +++ /dev/null @@ -1,133 +0,0 @@ -import requests -from PIL import Image -import os -import io -import base64 -from pathlib import Path -import gradio as gr - -# parameters which can be customized in settings.json of webui - -initial_width = 512 -initial_height = 512 - -params = { - "enable_SD_api": True, - "address": "https://dd31-2001-d08-e3-56b9-d182-69e5-40c4-e46d.ngrok-free.app", - "save_img": True, - "SD_model": "Chilloutmix-Ni-prune-fp32-fix.safetensors", - "prompt_prefix": "detailed portrait of a asian man in black jacket, Futuristic sci-fi fashion, fine details, realistic shaded, fine-face, pretty face cyberpunk, neotokyo, synthwave, aesthetics, futuristic, low-emission-neon, (blade runner movie scene)", - "negative_prompt": "jpeg artifacts, low quality, lowres, 3d, render, doll, plastic, blur, haze, monochrome, b&w, text, (ugly:1.2), unclear eyes, no arms, bad anatomy, cropped, censoring, asymmetric eyes, bad anatomy, bad proportions, cropped, cross-eyed, deformed, extra arms, extra fingers, extra limbs, fused fingers, malformed, mangled hands, misshapen body, missing arms, missing fingers, missing hands, missing legs, poorly drawn, tentacle finger, too many arms, too many fingers, watermark, logo, text, letters, signature, username, words, blurry, cropped", - "width": initial_width, - "height": initial_height, - "restore_faces": False, -} - -pic_id = 0 - - -# Get and save the Stable Diffusion-generated picture -def get_SD_pictures(description): - global params, pic_id - payload = { - "prompt": params["prompt_prefix"] + description, - "seed": -1, - "sampler_name": "Euler a", - "steps": 20, - "cfg_scale": 7, - "width": params["width"], - "height": params["height"], - "restore_faces": params["restore_faces"], - "negative_prompt": params["negative_prompt"], - } - - try: - response = requests.post( - url=f'{params["address"]}/sdapi/v1/txt2img', json=payload, timeout=100 - ) - response.raise_for_status() # Raises stored HTTPError, if one occurred - except requests.exceptions.HTTPError as http_err: - return f"HTTP error occurred: {http_err}" - except Exception as err: - return f"An error occurred: {err}" - - r = response.json() - - visible_result = "" - for img_str in r["images"]: - image = Image.open(io.BytesIO(base64.b64decode(img_str.split(",", 1)[0]))) - - if not os.path.exists("outputs"): - os.makedirs("outputs") - - if params["save_img"]: - output_file = Path(f"outputs/{pic_id:06d}.png") - image.save(output_file.as_posix()) - pic_id += 1 - - # lower the resolution of received images for the chat - # otherwise the log size gets out of control quickly - # with all the base64 values in visible history - image.thumbnail((512, 512)) - buffered = io.BytesIO() - image.save(buffered, format="JPEG") - buffered.seek(0) - image_bytes = buffered.getvalue() - img_str = "data:image/jpeg;base64," + base64.b64encode(image_bytes).decode() - visible_result = visible_result + f'{description}\n' - - return visible_result - - -def display_image(description: str, artist: str) -> gr.Image: - if artist == "Kashif": - description += " " - elif artist == "Daniel Ho": - description += " " - elif artist == "Adrian": - description += " " - elif artist == "Tony": - description += " " - elif artist == "Kevin": - description += " " - elif artist == "Aaron": - description += " " - - if artist == "none": - artist = False - - if artist: - params["width"] = 768 - params["height"] = 768 - params[ - "prompt_prefix" - ] = "detailed portrait of a asian man in black jacket, Futuristic sci-fi fashion, fine details, realistic shaded, fine-face, pretty face cyberpunk, neotokyo, synthwave, aesthetics, futuristic, low-emission-neon, (blade runner movie scene)" - else: - params["width"] = initial_width - params["height"] = initial_height - params["prompt_prefix"] = "4k" - - visible_result = get_SD_pictures(description) - if "error occurred" in visible_result: - return visible_result - - image_data = base64.b64decode(visible_result.split(",", 1)[1]) - image = Image.open(io.BytesIO(image_data)) - return image - - -inputs = [ - gr.inputs.Textbox(lines=2, label="Trial period has ended. Follow daveckw instagram for future updates"), - gr.inputs.Radio( - ["Kashif", "Daniel Ho", "Adrian", "Tony", "Kevin", "Aaron", "none"], - label="Artist", - ), -] - -outputs = gr.outputs.Image(type="pil", label="Generated Image") - -interface = gr.Interface( - fn=display_image, inputs=inputs, outputs=outputs, title="DC Image Generator" -) - -interface.launch() diff --git a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/latent_diffusion/ema.py b/spaces/dawood/audioldm-text-to-audio-generation/audioldm/latent_diffusion/ema.py deleted file mode 100644 index 192b012186bab3d8a5380bc9b891da8eef0fd9fa..0000000000000000000000000000000000000000 --- a/spaces/dawood/audioldm-text-to-audio-generation/audioldm/latent_diffusion/ema.py +++ /dev/null @@ -1,81 +0,0 @@ -import torch -from torch import nn - -class LitEma(nn.Module): - def __init__(self, model, decay=0.9999, use_num_upates=True): - super().__init__() - if decay < 0.0 or decay > 1.0: - raise ValueError("Decay must be between 0 and 1") - - self.m_name2s_name = {} - self.register_buffer("decay", torch.tensor(decay, dtype=torch.float32)) - self.register_buffer( - "num_updates", - torch.tensor(0, dtype=torch.int) - if use_num_upates - else torch.tensor(-1, dtype=torch.int), - ) - - for name, p in model.named_parameters(): - if p.requires_grad: - # remove as '.'-character is not allowed in buffers - s_name = name.replace(".", "") - self.m_name2s_name.update({name: s_name}) - self.register_buffer(s_name, p.clone().detach().data) - - self.collected_params = [] - - def forward(self, model): - decay = self.decay - - if self.num_updates >= 0: - self.num_updates += 1 - decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates)) - - one_minus_decay = 1.0 - decay - - with torch.no_grad(): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - - for key in m_param: - if m_param[key].requires_grad: - sname = self.m_name2s_name[key] - shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) - shadow_params[sname].sub_( - one_minus_decay * (shadow_params[sname] - m_param[key]) - ) - else: - assert not key in self.m_name2s_name - - def copy_to(self, model): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - for key in m_param: - if m_param[key].requires_grad: - m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) - else: - assert not key in self.m_name2s_name - - def store(self, parameters): - """ - Save the current parameters for restoring later. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - temporarily stored. - """ - self.collected_params = [param.clone() for param in parameters] - - def restore(self, parameters): - """ - Restore the parameters stored with the `store` method. - Useful to validate the model with EMA parameters without affecting the - original optimization process. Store the parameters before the - `copy_to` method. After validation (or model saving), use this to - restore the former parameters. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored parameters. - """ - for c_param, param in zip(self.collected_params, parameters): - param.data.copy_(c_param.data) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/attr/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/attr/__init__.py deleted file mode 100644 index 7cfa792f744b7e0b4e28a536c0603f142ded6518..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/attr/__init__.py +++ /dev/null @@ -1,132 +0,0 @@ -# SPDX-License-Identifier: MIT - -""" -Classes Without Boilerplate -""" - -from functools import partial -from typing import Callable - -from . import converters, exceptions, filters, setters, validators -from ._cmp import cmp_using -from ._config import get_run_validators, set_run_validators -from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types -from ._make import ( - NOTHING, - Attribute, - Factory, - attrib, - attrs, - fields, - fields_dict, - make_class, - validate, -) -from ._next_gen import define, field, frozen, mutable -from ._version_info import VersionInfo - - -s = attributes = attrs -ib = attr = attrib -dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) - - -class AttrsInstance: - pass - - -__all__ = [ - "Attribute", - "AttrsInstance", - "Factory", - "NOTHING", - "asdict", - "assoc", - "astuple", - "attr", - "attrib", - "attributes", - "attrs", - "cmp_using", - "converters", - "define", - "evolve", - "exceptions", - "field", - "fields", - "fields_dict", - "filters", - "frozen", - "get_run_validators", - "has", - "ib", - "make_class", - "mutable", - "resolve_types", - "s", - "set_run_validators", - "setters", - "validate", - "validators", -] - - -def _make_getattr(mod_name: str) -> Callable: - """ - Create a metadata proxy for packaging information that uses *mod_name* in - its warnings and errors. - """ - - def __getattr__(name: str) -> str: - dunder_to_metadata = { - "__title__": "Name", - "__copyright__": "", - "__version__": "version", - "__version_info__": "version", - "__description__": "summary", - "__uri__": "", - "__url__": "", - "__author__": "", - "__email__": "", - "__license__": "license", - } - if name not in dunder_to_metadata.keys(): - raise AttributeError(f"module {mod_name} has no attribute {name}") - - import sys - import warnings - - if sys.version_info < (3, 8): - from importlib_metadata import metadata - else: - from importlib.metadata import metadata - - if name != "__version_info__": - warnings.warn( - f"Accessing {mod_name}.{name} is deprecated and will be " - "removed in a future release. Use importlib.metadata directly " - "to query for attrs's packaging metadata.", - DeprecationWarning, - stacklevel=2, - ) - - meta = metadata("attrs") - if name == "__license__": - return "MIT" - elif name == "__copyright__": - return "Copyright (c) 2015 Hynek Schlawack" - elif name in ("__uri__", "__url__"): - return meta["Project-URL"].split(" ", 1)[-1] - elif name == "__version_info__": - return VersionInfo._from_version_string(meta["version"]) - elif name == "__author__": - return meta["Author-email"].rsplit(" ", 1)[0] - elif name == "__email__": - return meta["Author-email"].rsplit("<", 1)[1][:-1] - - return meta[dunder_to_metadata[name]] - - return __getattr__ - - -__getattr__ = _make_getattr(__name__) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/varLib/instancer/featureVars.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/varLib/instancer/featureVars.py deleted file mode 100644 index d9370d9d653aa847a0ada1fae2c3869b66fa27af..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/varLib/instancer/featureVars.py +++ /dev/null @@ -1,190 +0,0 @@ -from fontTools.ttLib.tables import otTables as ot -from copy import deepcopy -import logging - - -log = logging.getLogger("fontTools.varLib.instancer") - - -def _featureVariationRecordIsUnique(rec, seen): - conditionSet = [] - conditionSets = ( - rec.ConditionSet.ConditionTable if rec.ConditionSet is not None else [] - ) - for cond in conditionSets: - if cond.Format != 1: - # can't tell whether this is duplicate, assume is unique - return True - conditionSet.append( - (cond.AxisIndex, cond.FilterRangeMinValue, cond.FilterRangeMaxValue) - ) - # besides the set of conditions, we also include the FeatureTableSubstitution - # version to identify unique FeatureVariationRecords, even though only one - # version is currently defined. It's theoretically possible that multiple - # records with same conditions but different substitution table version be - # present in the same font for backward compatibility. - recordKey = frozenset([rec.FeatureTableSubstitution.Version] + conditionSet) - if recordKey in seen: - return False - else: - seen.add(recordKey) # side effect - return True - - -def _limitFeatureVariationConditionRange(condition, axisLimit): - minValue = condition.FilterRangeMinValue - maxValue = condition.FilterRangeMaxValue - - if ( - minValue > maxValue - or minValue > axisLimit.maximum - or maxValue < axisLimit.minimum - ): - # condition invalid or out of range - return - - return tuple( - axisLimit.renormalizeValue(v, extrapolate=False) for v in (minValue, maxValue) - ) - - -def _instantiateFeatureVariationRecord( - record, recIdx, axisLimits, fvarAxes, axisIndexMap -): - applies = True - shouldKeep = False - newConditions = [] - from fontTools.varLib.instancer import NormalizedAxisTripleAndDistances - - default_triple = NormalizedAxisTripleAndDistances(-1, 0, +1) - if record.ConditionSet is None: - record.ConditionSet = ot.ConditionSet() - record.ConditionSet.ConditionTable = [] - record.ConditionSet.ConditionCount = 0 - for i, condition in enumerate(record.ConditionSet.ConditionTable): - if condition.Format == 1: - axisIdx = condition.AxisIndex - axisTag = fvarAxes[axisIdx].axisTag - - minValue = condition.FilterRangeMinValue - maxValue = condition.FilterRangeMaxValue - triple = axisLimits.get(axisTag, default_triple) - - if not (minValue <= triple.default <= maxValue): - applies = False - - # if condition not met, remove entire record - if triple.minimum > maxValue or triple.maximum < minValue: - newConditions = None - break - - if axisTag in axisIndexMap: - # remap axis index - condition.AxisIndex = axisIndexMap[axisTag] - - # remap condition limits - newRange = _limitFeatureVariationConditionRange(condition, triple) - if newRange: - # keep condition with updated limits - minimum, maximum = newRange - condition.FilterRangeMinValue = minimum - condition.FilterRangeMaxValue = maximum - shouldKeep = True - if minimum != -1 or maximum != +1: - newConditions.append(condition) - else: - # condition out of range, remove entire record - newConditions = None - break - - else: - log.warning( - "Condition table {0} of FeatureVariationRecord {1} has " - "unsupported format ({2}); ignored".format(i, recIdx, condition.Format) - ) - applies = False - newConditions.append(condition) - - if newConditions is not None and shouldKeep: - record.ConditionSet.ConditionTable = newConditions - if not newConditions: - record.ConditionSet = None - shouldKeep = True - else: - shouldKeep = False - - # Does this *always* apply? - universal = shouldKeep and not newConditions - - return applies, shouldKeep, universal - - -def _instantiateFeatureVariations(table, fvarAxes, axisLimits): - pinnedAxes = set(axisLimits.pinnedLocation()) - axisOrder = [axis.axisTag for axis in fvarAxes if axis.axisTag not in pinnedAxes] - axisIndexMap = {axisTag: axisOrder.index(axisTag) for axisTag in axisOrder} - - featureVariationApplied = False - uniqueRecords = set() - newRecords = [] - defaultsSubsts = None - - for i, record in enumerate(table.FeatureVariations.FeatureVariationRecord): - applies, shouldKeep, universal = _instantiateFeatureVariationRecord( - record, i, axisLimits, fvarAxes, axisIndexMap - ) - - if shouldKeep and _featureVariationRecordIsUnique(record, uniqueRecords): - newRecords.append(record) - - if applies and not featureVariationApplied: - assert record.FeatureTableSubstitution.Version == 0x00010000 - defaultsSubsts = deepcopy(record.FeatureTableSubstitution) - for default, rec in zip( - defaultsSubsts.SubstitutionRecord, - record.FeatureTableSubstitution.SubstitutionRecord, - ): - default.Feature = deepcopy( - table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature - ) - table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = deepcopy( - rec.Feature - ) - # Set variations only once - featureVariationApplied = True - - # Further records don't have a chance to apply after a universal record - if universal: - break - - # Insert a catch-all record to reinstate the old features if necessary - if featureVariationApplied and newRecords and not universal: - defaultRecord = ot.FeatureVariationRecord() - defaultRecord.ConditionSet = ot.ConditionSet() - defaultRecord.ConditionSet.ConditionTable = [] - defaultRecord.ConditionSet.ConditionCount = 0 - defaultRecord.FeatureTableSubstitution = defaultsSubsts - - newRecords.append(defaultRecord) - - if newRecords: - table.FeatureVariations.FeatureVariationRecord = newRecords - table.FeatureVariations.FeatureVariationCount = len(newRecords) - else: - del table.FeatureVariations - # downgrade table version if there are no FeatureVariations left - table.Version = 0x00010000 - - -def instantiateFeatureVariations(varfont, axisLimits): - for tableTag in ("GPOS", "GSUB"): - if tableTag not in varfont or not getattr( - varfont[tableTag].table, "FeatureVariations", None - ): - continue - log.info("Instantiating FeatureVariations of %s table", tableTag) - _instantiateFeatureVariations( - varfont[tableTag].table, varfont["fvar"].axes, axisLimits - ) - # remove unreferenced lookups - varfont[tableTag].prune_lookups() diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/networking.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/networking.py deleted file mode 100644 index 83549a3c4d40aebf3bcfafa343486aa0a2848333..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/networking.py +++ /dev/null @@ -1,208 +0,0 @@ -""" -Defines helper methods useful for setting up ports, launching servers, and -creating tunnels. -""" -from __future__ import annotations - -import os -import socket -import threading -import time -import warnings -from typing import TYPE_CHECKING - -import requests -import uvicorn - -from gradio.exceptions import ServerFailedToStartError -from gradio.routes import App -from gradio.tunneling import Tunnel - -if TYPE_CHECKING: # Only import for type checking (to avoid circular imports). - from gradio.blocks import Blocks - -# By default, the local server will try to open on localhost, port 7860. -# If that is not available, then it will try 7861, 7862, ... 7959. -INITIAL_PORT_VALUE = int(os.getenv("GRADIO_SERVER_PORT", "7860")) -TRY_NUM_PORTS = int(os.getenv("GRADIO_NUM_PORTS", "100")) -LOCALHOST_NAME = os.getenv("GRADIO_SERVER_NAME", "127.0.0.1") -GRADIO_API_SERVER = "https://api.gradio.app/v2/tunnel-request" - - -class Server(uvicorn.Server): - def install_signal_handlers(self): - pass - - def run_in_thread(self): - self.thread = threading.Thread(target=self.run, daemon=True) - self.thread.start() - start = time.time() - while not self.started: - time.sleep(1e-3) - if time.time() - start > 5: - raise ServerFailedToStartError( - "Server failed to start. Please check that the port is available." - ) - - def close(self): - self.should_exit = True - self.thread.join() - - -def get_first_available_port(initial: int, final: int) -> int: - """ - Gets the first open port in a specified range of port numbers - Parameters: - initial: the initial value in the range of port numbers - final: final (exclusive) value in the range of port numbers, should be greater than `initial` - Returns: - port: the first open port in the range - """ - for port in range(initial, final): - try: - s = socket.socket() # create a socket object - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - s.bind((LOCALHOST_NAME, port)) # Bind to the port - s.close() - return port - except OSError: - pass - raise OSError( - f"All ports from {initial} to {final - 1} are in use. Please close a port." - ) - - -def configure_app(app: App, blocks: Blocks) -> App: - auth = blocks.auth - if auth is not None: - if not callable(auth): - app.auth = {account[0]: account[1] for account in auth} - else: - app.auth = auth - else: - app.auth = None - app.blocks = blocks - app.cwd = os.getcwd() - app.favicon_path = blocks.favicon_path - app.tokens = {} - return app - - -def start_server( - blocks: Blocks, - server_name: str | None = None, - server_port: int | None = None, - ssl_keyfile: str | None = None, - ssl_certfile: str | None = None, - ssl_keyfile_password: str | None = None, - app_kwargs: dict | None = None, -) -> tuple[str, int, str, App, Server]: - """Launches a local server running the provided Interface - Parameters: - blocks: The Blocks object to run on the server - server_name: to make app accessible on local network, set this to "0.0.0.0". Can be set by environment variable GRADIO_SERVER_NAME. - server_port: will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT. - auth: If provided, username and password (or list of username-password tuples) required to access the Blocks. Can also provide function that takes username and password and returns True if valid login. - ssl_keyfile: If a path to a file is provided, will use this as the private key file to create a local server running on https. - ssl_certfile: If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided. - ssl_keyfile_password: If a password is provided, will use this with the ssl certificate for https. - app_kwargs: Additional keyword arguments to pass to the gradio.routes.App constructor. - - Returns: - port: the port number the server is running on - path_to_local_server: the complete address that the local server can be accessed at - app: the FastAPI app object - server: the server object that is a subclass of uvicorn.Server (used to close the server) - """ - if ssl_keyfile is not None and ssl_certfile is None: - raise ValueError("ssl_certfile must be provided if ssl_keyfile is provided.") - - server_name = server_name or LOCALHOST_NAME - url_host_name = "localhost" if server_name == "0.0.0.0" else server_name - - # Strip IPv6 brackets from the address if they exist. - # This is needed as http://[::1]:port/ is a valid browser address, - # but not a valid IPv6 address, so asyncio will throw an exception. - if server_name.startswith("[") and server_name.endswith("]"): - host = server_name[1:-1] - else: - host = server_name - - app = App.create_app(blocks, app_kwargs=app_kwargs) - - server_ports = ( - [server_port] - if server_port is not None - else range(INITIAL_PORT_VALUE, INITIAL_PORT_VALUE + TRY_NUM_PORTS) - ) - - for port in server_ports: - try: - # The fastest way to check if a port is available is to try to bind to it with socket. - # If the port is not available, socket will throw an OSError. - s = socket.socket() - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - # Really, we should be checking if (server_name, server_port) is available, but - # socket.bind() doesn't seem to throw an OSError with ipv6 addresses, based on my testing. - # Instead, we just check if the port is available on localhost. - s.bind((LOCALHOST_NAME, port)) - s.close() - - # To avoid race conditions, so we also check if the port by trying to start the uvicorn server. - # If the port is not available, this will throw a ServerFailedToStartError. - config = uvicorn.Config( - app=app, - port=port, - host=host, - log_level="warning", - ssl_keyfile=ssl_keyfile, - ssl_certfile=ssl_certfile, - ssl_keyfile_password=ssl_keyfile_password, - ws_max_size=1024 * 1024 * 1024, # Setting max websocket size to be 1 GB - ) - server = Server(config=config) - server.run_in_thread() - break - except (OSError, ServerFailedToStartError): - pass - else: - raise OSError( - f"Cannot find empty port in range: {min(server_ports)}-{max(server_ports)}. You can specify a different port by setting the GRADIO_SERVER_PORT environment variable or passing the `server_port` parameter to `launch()`." - ) - - if ssl_keyfile is not None: - path_to_local_server = f"https://{url_host_name}:{port}/" - else: - path_to_local_server = f"http://{url_host_name}:{port}/" - - return server_name, port, path_to_local_server, app, server - - -def setup_tunnel(local_host: str, local_port: int, share_token: str) -> str: - response = requests.get(GRADIO_API_SERVER) - if response and response.status_code == 200: - try: - payload = response.json()[0] - remote_host, remote_port = payload["host"], int(payload["port"]) - tunnel = Tunnel( - remote_host, remote_port, local_host, local_port, share_token - ) - address = tunnel.start_tunnel() - return address - except Exception as e: - raise RuntimeError(str(e)) from e - raise RuntimeError("Could not get share link from Gradio API Server.") - - -def url_ok(url: str) -> bool: - try: - for _ in range(5): - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - r = requests.head(url, timeout=3, verify=False) - if r.status_code in (200, 401, 302): # 401 or 302 if auth is set - return True - time.sleep(0.500) - except (ConnectionError, requests.exceptions.ConnectionError): - return False - return False diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-65e780bb.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-65e780bb.js deleted file mode 100644 index 6c286a2e85998c3c4a6e84de80f937ae4a2d625d..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-65e780bb.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as w,e as L,s as M,m as T,g as o,Y as c,h as r,n as S,k as h,C as j,F as v,G as d,w as b,u as k,H,V as C,ae as B,o as E,Q as q,R as z,E as D}from"./index-39fce9e2.js";import{B as F}from"./Button-79f6e3bf.js";function G(t){let e,n;return{c(){e=T("div"),o(e,"class",n="prose "+t[1].join(" ")+" svelte-1ybaih5"),o(e,"id",t[0]),c(e,"min",t[4]),c(e,"hide",!t[3])},m(s,l){r(s,e,l),e.innerHTML=t[2]},p(s,[l]){l&4&&(e.innerHTML=s[2]),l&2&&n!==(n="prose "+s[1].join(" ")+" svelte-1ybaih5")&&o(e,"class",n),l&1&&o(e,"id",s[0]),l&18&&c(e,"min",s[4]),l&10&&c(e,"hide",!s[3])},i:S,o:S,d(s){s&&h(e)}}}function Q(t,e,n){let{elem_id:s=""}=e,{elem_classes:l=[]}=e,{value:m}=e,{visible:u=!0}=e,{min_height:f=!1}=e;const i=j();return t.$$set=a=>{"elem_id"in a&&n(0,s=a.elem_id),"elem_classes"in a&&n(1,l=a.elem_classes),"value"in a&&n(2,m=a.value),"visible"in a&&n(3,u=a.visible),"min_height"in a&&n(4,f=a.min_height)},t.$$.update=()=>{t.$$.dirty&4&&i("change")},[s,l,m,u,f]}class R extends w{constructor(e){super(),L(this,e,Q,G,M,{elem_id:0,elem_classes:1,value:2,visible:3,min_height:4})}}function V(t){let e,n,s,l,m;const u=[t[4],{variant:"center"}];let f={};for(let i=0;i{"label"in _&&n(5,s=_.label),"elem_id"in _&&n(0,l=_.elem_id),"elem_classes"in _&&n(1,m=_.elem_classes),"visible"in _&&n(2,u=_.visible),"value"in _&&n(3,f=_.value),"loading_status"in _&&n(4,i=_.loading_status)},t.$$.update=()=>{t.$$.dirty&32&&a("change")},[l,m,u,f,i,s,g]}class I extends w{constructor(e){super(),L(this,e,A,Y,M,{label:5,elem_id:0,elem_classes:1,visible:2,value:3,loading_status:4})}}const N=I,O=["static"];export{N as Component,O as modes}; -//# sourceMappingURL=index-65e780bb.js.map diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/httpcore/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/httpcore/__init__.py deleted file mode 100644 index da95f8d0bb6bf7c91713dddc9615873d5bf268bc..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/httpcore/__init__.py +++ /dev/null @@ -1,139 +0,0 @@ -from ._api import request, stream -from ._async import ( - AsyncConnectionInterface, - AsyncConnectionPool, - AsyncHTTP2Connection, - AsyncHTTP11Connection, - AsyncHTTPConnection, - AsyncHTTPProxy, - AsyncSOCKSProxy, -) -from ._backends.base import ( - SOCKET_OPTION, - AsyncNetworkBackend, - AsyncNetworkStream, - NetworkBackend, - NetworkStream, -) -from ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream -from ._backends.sync import SyncBackend -from ._exceptions import ( - ConnectError, - ConnectionNotAvailable, - ConnectTimeout, - LocalProtocolError, - NetworkError, - PoolTimeout, - ProtocolError, - ProxyError, - ReadError, - ReadTimeout, - RemoteProtocolError, - TimeoutException, - UnsupportedProtocol, - WriteError, - WriteTimeout, -) -from ._models import URL, Origin, Request, Response -from ._ssl import default_ssl_context -from ._sync import ( - ConnectionInterface, - ConnectionPool, - HTTP2Connection, - HTTP11Connection, - HTTPConnection, - HTTPProxy, - SOCKSProxy, -) - -# The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed. -try: - from ._backends.anyio import AnyIOBackend -except ImportError: # pragma: nocover - - class AnyIOBackend: # type: ignore - def __init__(self, *args, **kwargs): # type: ignore - msg = ( - "Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed." - ) - raise RuntimeError(msg) - - -# The 'httpcore.TrioBackend' class is conditional on 'trio' being installed. -try: - from ._backends.trio import TrioBackend -except ImportError: # pragma: nocover - - class TrioBackend: # type: ignore - def __init__(self, *args, **kwargs): # type: ignore - msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed." - raise RuntimeError(msg) - - -__all__ = [ - # top-level requests - "request", - "stream", - # models - "Origin", - "URL", - "Request", - "Response", - # async - "AsyncHTTPConnection", - "AsyncConnectionPool", - "AsyncHTTPProxy", - "AsyncHTTP11Connection", - "AsyncHTTP2Connection", - "AsyncConnectionInterface", - "AsyncSOCKSProxy", - # sync - "HTTPConnection", - "ConnectionPool", - "HTTPProxy", - "HTTP11Connection", - "HTTP2Connection", - "ConnectionInterface", - "SOCKSProxy", - # network backends, implementations - "SyncBackend", - "AnyIOBackend", - "TrioBackend", - # network backends, mock implementations - "AsyncMockBackend", - "AsyncMockStream", - "MockBackend", - "MockStream", - # network backends, interface - "AsyncNetworkStream", - "AsyncNetworkBackend", - "NetworkStream", - "NetworkBackend", - # util - "default_ssl_context", - "SOCKET_OPTION", - # exceptions - "ConnectionNotAvailable", - "ProxyError", - "ProtocolError", - "LocalProtocolError", - "RemoteProtocolError", - "UnsupportedProtocol", - "TimeoutException", - "PoolTimeout", - "ConnectTimeout", - "ReadTimeout", - "WriteTimeout", - "NetworkError", - "ConnectError", - "ReadError", - "WriteError", -] - -__version__ = "0.17.3" - - -__locals = locals() -for __name in __all__: - if not __name.startswith("__"): - setattr(__locals[__name], "__module__", "httpcore") # noqa diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py b/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py deleted file mode 100644 index f7f1467fc53a7e27a7ffdb171a40791aa5b97134..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/schedulers/scheduling_heun_discrete.py +++ /dev/null @@ -1,299 +0,0 @@ -# Copyright 2023 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput - - -# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar -def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999) -> torch.Tensor: - """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. - - - Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to - prevent singularities. - - Returns: - betas (`np.ndarray`): the betas used by the scheduler to step the model outputs - """ - - def alpha_bar(time_step): - return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 - - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return torch.tensor(betas, dtype=torch.float32) - - -class HeunDiscreteScheduler(SchedulerMixin, ConfigMixin): - """ - Implements Algorithm 2 (Heun steps) from Karras et al. (2022). for discrete beta schedules. Based on the original - k-diffusion implementation by Katherine Crowson: - https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L90 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the - starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear` or `scaled_linear`. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, - `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. - prediction_type (`str`, default `epsilon`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 - https://imagen.research.google/video/paper.pdf) - """ - - _compatibles = [e.name for e in KarrasDiffusionSchedulers] - order = 2 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.00085, # sensible defaults - beta_end: float = 0.012, - beta_schedule: str = "linear", - trained_betas: Optional[Union[np.ndarray, List[float]]] = None, - prediction_type: str = "epsilon", - ): - if trained_betas is not None: - self.betas = torch.tensor(trained_betas, dtype=torch.float32) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - elif beta_schedule == "squaredcos_cap_v2": - # Glide cosine schedule - self.betas = betas_for_alpha_bar(num_train_timesteps) - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - - # set all values - self.set_timesteps(num_train_timesteps, None, num_train_timesteps) - - def index_for_timestep(self, timestep): - indices = (self.timesteps == timestep).nonzero() - if self.state_in_first_order: - pos = -1 - else: - pos = 0 - return indices[pos].item() - - def scale_model_input( - self, - sample: torch.FloatTensor, - timestep: Union[float, torch.FloatTensor], - ) -> torch.FloatTensor: - """ - Args: - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - sample (`torch.FloatTensor`): input sample timestep (`int`, optional): current timestep - Returns: - `torch.FloatTensor`: scaled input sample - """ - step_index = self.index_for_timestep(timestep) - - sigma = self.sigmas[step_index] - sample = sample / ((sigma**2 + 1) ** 0.5) - return sample - - def set_timesteps( - self, - num_inference_steps: int, - device: Union[str, torch.device] = None, - num_train_timesteps: Optional[int] = None, - ): - """ - Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - device (`str` or `torch.device`, optional): - the device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - """ - self.num_inference_steps = num_inference_steps - - num_train_timesteps = num_train_timesteps or self.config.num_train_timesteps - - timesteps = np.linspace(0, num_train_timesteps - 1, num_inference_steps, dtype=float)[::-1].copy() - - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) - sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) - sigmas = torch.from_numpy(sigmas).to(device=device) - self.sigmas = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) - - # standard deviation of the initial noise distribution - self.init_noise_sigma = self.sigmas.max() - - timesteps = torch.from_numpy(timesteps) - timesteps = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) - - if str(device).startswith("mps"): - # mps does not support float64 - self.timesteps = timesteps.to(device, dtype=torch.float32) - else: - self.timesteps = timesteps.to(device=device) - - # empty dt and derivative - self.prev_derivative = None - self.dt = None - - @property - def state_in_first_order(self): - return self.dt is None - - def step( - self, - model_output: Union[torch.FloatTensor, np.ndarray], - timestep: Union[float, torch.FloatTensor], - sample: Union[torch.FloatTensor, np.ndarray], - return_dict: bool = True, - ) -> Union[SchedulerOutput, Tuple]: - """ - Args: - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model. timestep - (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor` or `np.ndarray`): - current instance of sample being created by diffusion process. - return_dict (`bool`): option for returning tuple rather than SchedulerOutput class - Returns: - [`~schedulers.scheduling_utils.SchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.SchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - """ - step_index = self.index_for_timestep(timestep) - - if self.state_in_first_order: - sigma = self.sigmas[step_index] - sigma_next = self.sigmas[step_index + 1] - else: - # 2nd order / Heun's method - sigma = self.sigmas[step_index - 1] - sigma_next = self.sigmas[step_index] - - # currently only gamma=0 is supported. This usually works best anyways. - # We can support gamma in the future but then need to scale the timestep before - # passing it to the model which requires a change in API - gamma = 0 - sigma_hat = sigma * (gamma + 1) # Note: sigma_hat == sigma for now - - # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise - if self.config.prediction_type == "epsilon": - sigma_input = sigma_hat if self.state_in_first_order else sigma_next - pred_original_sample = sample - sigma_input * model_output - elif self.config.prediction_type == "v_prediction": - sigma_input = sigma_hat if self.state_in_first_order else sigma_next - pred_original_sample = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( - sample / (sigma_input**2 + 1) - ) - elif self.config.prediction_type == "sample": - raise NotImplementedError("prediction_type not implemented yet: sample") - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" - ) - - if self.state_in_first_order: - # 2. Convert to an ODE derivative for 1st order - derivative = (sample - pred_original_sample) / sigma_hat - # 3. delta timestep - dt = sigma_next - sigma_hat - - # store for 2nd order step - self.prev_derivative = derivative - self.dt = dt - self.sample = sample - else: - # 2. 2nd order / Heun's method - derivative = (sample - pred_original_sample) / sigma_next - derivative = (self.prev_derivative + derivative) / 2 - - # 3. take prev timestep & sample - dt = self.dt - sample = self.sample - - # free dt and derivative - # Note, this puts the scheduler in "first order mode" - self.prev_derivative = None - self.dt = None - self.sample = None - - prev_sample = sample + derivative * dt - - if not return_dict: - return (prev_sample,) - - return SchedulerOutput(prev_sample=prev_sample) - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.FloatTensor, - ) -> torch.FloatTensor: - # Make sure sigmas and timesteps have the same device and dtype as original_samples - self.sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) - if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): - # mps does not support float64 - self.timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) - timesteps = timesteps.to(original_samples.device, dtype=torch.float32) - else: - self.timesteps = self.timesteps.to(original_samples.device) - timesteps = timesteps.to(original_samples.device) - - step_indices = [self.index_for_timestep(t) for t in timesteps] - - sigma = self.sigmas[step_indices].flatten() - while len(sigma.shape) < len(original_samples.shape): - sigma = sigma.unsqueeze(-1) - - noisy_samples = original_samples + noise * sigma - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/deepwisdom/MetaGPT/metagpt/tools/__init__.py b/spaces/deepwisdom/MetaGPT/metagpt/tools/__init__.py deleted file mode 100644 index a148bb7447d4219e7f0e92ed002ddfddff8d6a20..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/metagpt/tools/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/4/29 15:35 -@Author : alexanderwu -@File : __init__.py -""" - - -from enum import Enum - - -class SearchEngineType(Enum): - SERPAPI_GOOGLE = "serpapi" - SERPER_GOOGLE = "serper" - DIRECT_GOOGLE = "google" - DUCK_DUCK_GO = "ddg" - CUSTOM_ENGINE = "custom" - - -class WebBrowserEngineType(Enum): - PLAYWRIGHT = "playwright" - SELENIUM = "selenium" - CUSTOM = "custom" - - @classmethod - def _missing_(cls, key): - """缺省类型转换""" - return cls.CUSTOM diff --git a/spaces/diacanFperku/AutoGPT/Carman Scan Lite Update Keygen.md b/spaces/diacanFperku/AutoGPT/Carman Scan Lite Update Keygen.md deleted file mode 100644 index 9a43283654367eaa9c7f8dfc197d1d6f8f3c8102..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Carman Scan Lite Update Keygen.md +++ /dev/null @@ -1,11 +0,0 @@ -

carman scan lite update keygen


Download Filehttps://gohhs.com/2uFVdF



-
-CarmanScan Update 1550 on all devices [Lite,WI,VG+VG64,VCI,AT] [06.2015] win | 413 MB. Carman Scan Automotive Diagnostic Scan Tools - Carman ... CarmanScan Plus v2.48 (update) is the latest version of the ... -Carman Scan VG + Vgate - a device for reading and erasing errors in Russian ... -CarmanScan VG+ is designed to read and erase errors from the car's ECU memory. -Runs on Windows XP and above. -To connect to the car, you only need to use the OBD2 connectors or ... -CarmanScan VG+ is a universal device for diagnosing, reading and erasing errors from the vehicle's system memory. 8a78ff9644
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/Essays In Love Alain De Botton Pdf __HOT__.md b/spaces/diacanFperku/AutoGPT/Essays In Love Alain De Botton Pdf __HOT__.md deleted file mode 100644 index 0449e6cad9d4717c55788acb42c8a335d3c9196e..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Essays In Love Alain De Botton Pdf __HOT__.md +++ /dev/null @@ -1,6 +0,0 @@ -

Essays In Love Alain De Botton Pdf


Download ○○○ https://gohhs.com/2uFUkr



-
-Acces PDF. Emotional Rescue. Essays On Love. Loss And Life. With A. Soundtrack. Alain de Botton: On Love |. Digital Season by Sydney Opera. House 10 ... 1fdad05405
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/Jmicron Firmware Update Tool.md b/spaces/diacanFperku/AutoGPT/Jmicron Firmware Update Tool.md deleted file mode 100644 index 96aa08bdff07bda8482093bcfcbd0411f9409f79..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Jmicron Firmware Update Tool.md +++ /dev/null @@ -1,6 +0,0 @@ -

jmicron firmware update tool


DOWNLOAD ••• https://gohhs.com/2uFTaA



-
-JMicron Firmware Update Utility for JMS539 v0.5 No Sleep jms539 firmware update.zip (1.02 MiB) Downloaded 832 times. after you install it you will find the tool ... 1fdad05405
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l.md b/spaces/diacanFperku/AutoGPT/PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l.md deleted file mode 100644 index ed53e799a03b20b9d4ad869829ef39c03d3e3147..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l.md +++ /dev/null @@ -1,88 +0,0 @@ - -

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l: A Review

-

If you are looking for a powerful and reliable PDF editor, you might want to check out PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l. This is a pre-cracked version of the popular Adobe Acrobat Pro DC software, which means you don't need to worry about activation or license keys. You can just download and install it on your computer and enjoy all the features of the original software.

-

In this article, we will review PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l and see what it can do for you. We will also show you how to download and install it safely and easily.

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l


DOWNLOAD ☆☆☆☆☆ https://gohhs.com/2uFUgA



-

What is PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l?

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a modified version of the original Adobe Acrobat Pro DC software, which is one of the best PDF editors in the market. It allows you to create, edit, convert, sign, and share PDF files with ease and efficiency.

-

Some of the features of PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l are:

-
    -
  • Create PDF files from any application that prints, such as Word, Excel, PowerPoint, etc.
  • -
  • Edit PDF files with advanced tools, such as adding or deleting text, images, links, headers, footers, etc.
  • -
  • Convert PDF files to other formats, such as Word, Excel, PowerPoint, HTML, etc.
  • -
  • Sign PDF files with digital signatures or fillable forms.
  • -
  • Share PDF files with others via email, cloud services, or social media.
  • -
  • Collaborate with others on PDF files using comments, annotations, or markups.
  • -
  • Protect PDF files with passwords, encryption, or redaction.
  • -
  • Optimize PDF files for web or mobile viewing.
  • -
-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is pre-cracked by CrackzSoft team, which means you don't need to activate it with a license key or a serial number. You can just install it and use it without any limitations or restrictions.

-

How to Download and Install PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l?

-

If you want to download and install PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l on your computer, you need to follow these steps:

-

-
    -
  1. Go to the official website of CrackzSoft and find the download link for PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l.
  2. -
  3. Click on the download link and wait for the file to be downloaded on your computer.
  4. -
  5. Extract the file using WinRAR or any other extraction tool.
  6. -
  7. Run the setup file and follow the instructions on the screen.
  8. -
  9. Wait for the installation to be completed.
  10. -
  11. Enjoy using PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l on your computer.
  12. -
-

Note: You don't need to disable your antivirus or firewall before installing PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l as it is safe and virus-free.

-

Conclusion

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a great PDF editor that can help you create, edit, convert, sign, and share PDF files with ease and efficiency. It is pre-cracked by CrackzSoft team so you don't need to activate it with a license key or a serial number. You can just download and install it on your computer and use it without any limitations or restrictions.

-

If you are looking for a powerful and reliable PDF editor, you should definitely try PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l today!

-

What are the Benefits of PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l?

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l has many benefits that make it a superior PDF editor compared to other alternatives. Some of the benefits are:

-
    -
  • It is pre-cracked, which means you don't need to pay for a license or a subscription to use it.
  • -
  • It is updated to the latest version of Adobe Acrobat Pro DC, which means you can enjoy all the new features and improvements.
  • -
  • It is compatible with Windows 10, 8, 7, and Vista, which means you can use it on any computer.
  • -
  • It is easy to use, which means you can create, edit, convert, sign, and share PDF files with just a few clicks.
  • -
  • It is fast and efficient, which means you can work with large and complex PDF files without any lag or crash.
  • -
-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a great PDF editor that can help you save time and money while working with PDF files.

-

What are the Drawbacks of PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l?

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is not a perfect PDF editor and it has some drawbacks that you should be aware of before using it. Some of the drawbacks are:

-
    -
  • It is not an official version of Adobe Acrobat Pro DC, which means you may encounter some bugs or errors that are not fixed by Adobe.
  • -
  • It is not supported by Adobe, which means you cannot get any technical support or customer service from them.
  • -
  • It may not be compatible with some third-party plugins or extensions that are designed for the original Adobe Acrobat Pro DC software.
  • -
  • It may not be legal in some countries or regions that have strict copyright laws or regulations.
  • -
  • It may not be safe or secure if you download it from an untrusted source or website.
  • -
-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a risky PDF editor that may cause some problems or issues while working with PDF files.

-

How to Use PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l?

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is easy to use and has a user-friendly interface that lets you work with PDF files in a few simple steps. Here are some tips on how to use PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l:

-
    -
  • To create a PDF file, you can either drag and drop a file into the software or click on the Create button and choose a file from your computer.
  • -
  • To edit a PDF file, you can either double-click on it or right-click on it and choose Open With PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l.
  • -
  • To convert a PDF file, you can either click on the Export button and choose a format from the list or right-click on it and choose Export To.
  • -
  • To sign a PDF file, you can either click on the Fill & Sign button and choose a signature option or right-click on it and choose Fill & Sign.
  • -
  • To share a PDF file, you can either click on the Share button and choose a sharing option or right-click on it and choose Share.
  • -
-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a versatile PDF editor that can help you work with PDF files in various ways.

-

Where to Download PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l?

-

If you want to download PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l, you need to be careful about the source or website that you choose. There are many fake or malicious websites that may offer you PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l but actually infect your computer with viruses or malware.

-

The best and safest way to download PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is to go to the official website of CrackzSoft and find the download link for PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l there. You can also check the comments or reviews of other users who have downloaded PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l from CrackzSoft website and see if they have any issues or complaints.

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a high-quality PDF editor that you can download from CrackzSoft website without any worries or risks.

-

How to Uninstall PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l?

-

If you want to uninstall PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l from your computer, you need to follow these steps:

-
    -
  1. Go to the Control Panel and click on Programs and Features.
  2. -
  3. Find PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l from the list of installed programs and click on Uninstall.
  4. -
  5. Follow the instructions on the screen to complete the uninstallation process.
  6. -
  7. Restart your computer to remove any leftover files or registry entries.
  8. -
-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is easy to uninstall and does not leave any traces on your computer.

-

Is PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l Worth It?

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a PDF editor that has many advantages and disadvantages. It depends on your needs and preferences whether PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is worth it or not.

-

If you are looking for a PDF editor that is pre-cracked, updated, compatible, easy to use, fast and efficient, then PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l may be worth it for you.

-

If you are looking for a PDF editor that is official, supported, compatible with plugins or extensions, legal, safe and secure, then PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l may not be worth it for you.

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a PDF editor that has its pros and cons. You should weigh them carefully before deciding to use it or not.

-

Conclusion

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a PDF editor that can help you create, edit, convert, sign, and share PDF files with ease and efficiency. It is pre-cracked by CrackzSoft team so you don't need to activate it with a license key or a serial number. You can just download and install it on your computer and use it without any limitations or restrictions.

-

However, PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is not an official version of Adobe Acrobat Pro DC and it has some drawbacks that you should be aware of before using it. It is not supported by Adobe, it may not be compatible with some plugins or extensions, it may not be legal in some countries or regions, and it may not be safe or secure if you download it from an untrusted source or website.

-

PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l is a PDF editor that has its pros and cons. You should weigh them carefully before deciding to use it or not. If you are looking for a powerful and reliable PDF editor, you should definitely try PATCHED Adobe Acrobat Pro DC 2018.009.20044 Pre-Cracked - [CrackzSoft]l today!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/README_zh.md b/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/README_zh.md deleted file mode 100644 index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Bufeiyan-c-Bert-VITS2/README_zh.md +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py b/spaces/dineshreddy/WALT/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py deleted file mode 100644 index 0e86d2ea67e154fae18dbf9d2bfde6d0a70e582c..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py +++ /dev/null @@ -1,205 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule - -from mmdet.models.builder import HEADS -from .bbox_head import BBoxHead - - -@HEADS.register_module() -class ConvFCBBoxHead(BBoxHead): - r"""More general bbox head, with shared conv and fc layers and two optional - separated branches. - - .. code-block:: none - - /-> cls convs -> cls fcs -> cls - shared convs -> shared fcs - \-> reg convs -> reg fcs -> reg - """ # noqa: W605 - - def __init__(self, - num_shared_convs=0, - num_shared_fcs=0, - num_cls_convs=0, - num_cls_fcs=0, - num_reg_convs=0, - num_reg_fcs=0, - conv_out_channels=256, - fc_out_channels=1024, - conv_cfg=None, - norm_cfg=None, - *args, - **kwargs): - super(ConvFCBBoxHead, self).__init__(*args, **kwargs) - assert (num_shared_convs + num_shared_fcs + num_cls_convs + - num_cls_fcs + num_reg_convs + num_reg_fcs > 0) - if num_cls_convs > 0 or num_reg_convs > 0: - assert num_shared_fcs == 0 - if not self.with_cls: - assert num_cls_convs == 0 and num_cls_fcs == 0 - if not self.with_reg: - assert num_reg_convs == 0 and num_reg_fcs == 0 - self.num_shared_convs = num_shared_convs - self.num_shared_fcs = num_shared_fcs - self.num_cls_convs = num_cls_convs - self.num_cls_fcs = num_cls_fcs - self.num_reg_convs = num_reg_convs - self.num_reg_fcs = num_reg_fcs - self.conv_out_channels = conv_out_channels - self.fc_out_channels = fc_out_channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - # add shared convs and fcs - self.shared_convs, self.shared_fcs, last_layer_dim = \ - self._add_conv_fc_branch( - self.num_shared_convs, self.num_shared_fcs, self.in_channels, - True) - self.shared_out_channels = last_layer_dim - - # add cls specific branch - self.cls_convs, self.cls_fcs, self.cls_last_dim = \ - self._add_conv_fc_branch( - self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) - - # add reg specific branch - self.reg_convs, self.reg_fcs, self.reg_last_dim = \ - self._add_conv_fc_branch( - self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) - - if self.num_shared_fcs == 0 and not self.with_avg_pool: - if self.num_cls_fcs == 0: - self.cls_last_dim *= self.roi_feat_area - if self.num_reg_fcs == 0: - self.reg_last_dim *= self.roi_feat_area - - self.relu = nn.ReLU(inplace=True) - # reconstruct fc_cls and fc_reg since input channels are changed - if self.with_cls: - self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1) - if self.with_reg: - out_dim_reg = (4 if self.reg_class_agnostic else 4 * - self.num_classes) - self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) - - def _add_conv_fc_branch(self, - num_branch_convs, - num_branch_fcs, - in_channels, - is_shared=False): - """Add shared or separable branch. - - convs -> avg pool (optional) -> fcs - """ - last_layer_dim = in_channels - # add branch specific conv layers - branch_convs = nn.ModuleList() - if num_branch_convs > 0: - for i in range(num_branch_convs): - conv_in_channels = ( - last_layer_dim if i == 0 else self.conv_out_channels) - branch_convs.append( - ConvModule( - conv_in_channels, - self.conv_out_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - last_layer_dim = self.conv_out_channels - # add branch specific fc layers - branch_fcs = nn.ModuleList() - if num_branch_fcs > 0: - # for shared branch, only consider self.with_avg_pool - # for separated branches, also consider self.num_shared_fcs - if (is_shared - or self.num_shared_fcs == 0) and not self.with_avg_pool: - last_layer_dim *= self.roi_feat_area - for i in range(num_branch_fcs): - fc_in_channels = ( - last_layer_dim if i == 0 else self.fc_out_channels) - branch_fcs.append( - nn.Linear(fc_in_channels, self.fc_out_channels)) - last_layer_dim = self.fc_out_channels - return branch_convs, branch_fcs, last_layer_dim - - def init_weights(self): - super(ConvFCBBoxHead, self).init_weights() - # conv layers are already initialized by ConvModule - for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: - for m in module_list.modules(): - if isinstance(m, nn.Linear): - nn.init.xavier_uniform_(m.weight) - nn.init.constant_(m.bias, 0) - - def forward(self, x): - # shared part - if self.num_shared_convs > 0: - for conv in self.shared_convs: - x = conv(x) - - if self.num_shared_fcs > 0: - if self.with_avg_pool: - x = self.avg_pool(x) - - x = x.flatten(1) - - for fc in self.shared_fcs: - x = self.relu(fc(x)) - # separate branches - x_cls = x - x_reg = x - - for conv in self.cls_convs: - x_cls = conv(x_cls) - if x_cls.dim() > 2: - if self.with_avg_pool: - x_cls = self.avg_pool(x_cls) - x_cls = x_cls.flatten(1) - for fc in self.cls_fcs: - x_cls = self.relu(fc(x_cls)) - - for conv in self.reg_convs: - x_reg = conv(x_reg) - if x_reg.dim() > 2: - if self.with_avg_pool: - x_reg = self.avg_pool(x_reg) - x_reg = x_reg.flatten(1) - for fc in self.reg_fcs: - x_reg = self.relu(fc(x_reg)) - - cls_score = self.fc_cls(x_cls) if self.with_cls else None - bbox_pred = self.fc_reg(x_reg) if self.with_reg else None - return cls_score, bbox_pred - - -@HEADS.register_module() -class Shared2FCBBoxHead(ConvFCBBoxHead): - - def __init__(self, fc_out_channels=1024, *args, **kwargs): - super(Shared2FCBBoxHead, self).__init__( - num_shared_convs=0, - num_shared_fcs=2, - num_cls_convs=0, - num_cls_fcs=0, - num_reg_convs=0, - num_reg_fcs=0, - fc_out_channels=fc_out_channels, - *args, - **kwargs) - - -@HEADS.register_module() -class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): - - def __init__(self, fc_out_channels=1024, *args, **kwargs): - super(Shared4Conv1FCBBoxHead, self).__init__( - num_shared_convs=4, - num_shared_fcs=1, - num_cls_convs=0, - num_cls_fcs=0, - num_reg_convs=0, - num_reg_fcs=0, - fc_out_channels=fc_out_channels, - *args, - **kwargs) diff --git a/spaces/dirge/voicevox/voicevox_engine/setting/Setting.py b/spaces/dirge/voicevox/voicevox_engine/setting/Setting.py deleted file mode 100644 index f8912c6bff9afa959f445d8aa9c89c440b36b8db..0000000000000000000000000000000000000000 --- a/spaces/dirge/voicevox/voicevox_engine/setting/Setting.py +++ /dev/null @@ -1,25 +0,0 @@ -from enum import Enum -from typing import Optional - -from pydantic import BaseModel, Field - - -class CorsPolicyMode(str, Enum): - """ - CORSの許可モード - """ - - all = "all" # 全てのオリジンからのリクエストを許可 - localapps = "localapps" # ローカルアプリケーションからのリクエストを許可 - - -class Setting(BaseModel): - """ - エンジンの設定情報 - """ - - cors_policy_mode: CorsPolicyMode = Field(title="リソース共有ポリシー") - allow_origin: Optional[str] = Field(title="許可するオリジン") - - class Config: - use_enum_values = True diff --git a/spaces/ds520/bingo/src/components/chat-header.tsx b/spaces/ds520/bingo/src/components/chat-header.tsx deleted file mode 100644 index c6664b8dee61179f844d45c5bd650518fc2cb4c2..0000000000000000000000000000000000000000 --- a/spaces/ds520/bingo/src/components/chat-header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import LogoIcon from '@/assets/images/logo.svg' -import Image from 'next/image' - -export function ChatHeader() { - return ( -
- logo -
欢迎使用新必应
-
由 AI 支持的网页版 Copilot
-
- ) -} diff --git a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/losses.py b/spaces/eIysia/VITS-Umamusume-voice-synthesizer/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/utils.py b/spaces/eIysia/VITS-Umamusume-voice-synthesizer/utils.py deleted file mode 100644 index 9794e0fc3463a5e8fad05c037cce64683059a6d3..0000000000000000000000000000000000000000 --- a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/utils.py +++ /dev/null @@ -1,226 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.ERROR) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() \ No newline at end of file diff --git a/spaces/epexVfeibi/Imagedeblurr/3dsmax 2014 Xforce Keygen.md b/spaces/epexVfeibi/Imagedeblurr/3dsmax 2014 Xforce Keygen.md deleted file mode 100644 index 312684bdc81130a057ab058ea42e70d53e925927..0000000000000000000000000000000000000000 --- a/spaces/epexVfeibi/Imagedeblurr/3dsmax 2014 Xforce Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -

3dsmax 2014 xforce keygen


Download Filehttps://jinyurl.com/2uEpx3



- -start XFORCE Keygen 32bits version or 64bits version. 8.Click on Mem Patch (you should see successfully patched). 9.Copy the request code ... 4d29de3e1b
-
-
-

diff --git a/spaces/eson/tokenizer-arena/vocab/chatglm_6b/__init__.py b/spaces/eson/tokenizer-arena/vocab/chatglm_6b/__init__.py deleted file mode 100644 index 4d6739668ffc7bc16aefee191675b9a78785ef86..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/chatglm_6b/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -依赖 icetk -""" - -import os -from transformers import AutoTokenizer - -os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" - -CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) -TOKENIZER_DIR = os.path.join(CURRENT_DIR, "tokenizer") - -tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_DIR, trust_remote_code=True) - - -# vocab_size = len(tokenizer.get_vocab()) -# vocab_size = tokenizer.vocab_size diff --git a/spaces/eson/tokenizer-arena/vocab/gpt_nexo_20b/test_hf_gpt_neox.py b/spaces/eson/tokenizer-arena/vocab/gpt_nexo_20b/test_hf_gpt_neox.py deleted file mode 100644 index 7ec7e848ebf6df47b222bd06fe9117ebb9a308ff..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/gpt_nexo_20b/test_hf_gpt_neox.py +++ /dev/null @@ -1,20 +0,0 @@ -""" - -https://github.com/EleutherAI/gpt-neox/blob/main/tools/corpora.py - -## - -""" - - -from transformers import AutoTokenizer, AutoModelForCausalLM - -# tokenizer = AutoTokenizer.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B") -tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") - -tokens = tokenizer.encode("good night\n中国 ss一个人去哪里") - -print(tokens) -print(tokenizer.decode(tokens)) -for token in tokens: - print(token, tokenizer.decode([token])) \ No newline at end of file diff --git a/spaces/exbert-project/exbert/client/src/exBERT.html b/spaces/exbert-project/exbert/client/src/exBERT.html deleted file mode 100644 index d6b46e939d061a7b848f7f0d302aef014db367a8..0000000000000000000000000000000000000000 --- a/spaces/exbert-project/exbert/client/src/exBERT.html +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - exBERT - - - - - - - - -
- - - - - - - \ No newline at end of file diff --git a/spaces/facebook/MaskCut/app.py b/spaces/facebook/MaskCut/app.py deleted file mode 100644 index f7a57453bd15567e4f91e5584c3d73aaeb19dc62..0000000000000000000000000000000000000000 --- a/spaces/facebook/MaskCut/app.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python - -import os -import pathlib - -import gradio as gr -import numpy as np -import PIL.Image as Image - -from model import Model, random_color, vis_mask - -model = Model() - - -def run(image_path, threshold, max_num_mask): - image = np.asarray(Image.open(image_path).convert('RGB')) - masks = model(image_path, threshold, max_num_mask) - for mask in masks: - image = vis_mask(image, mask, random_color(rgb=True)) - return image - - -DESCRIPTION = '# [MaskCut](https://github.com/facebookresearch/CutLER)' - -paths = sorted(pathlib.Path('CutLER/maskcut/imgs').glob('*.jpg')) - -with gr.Blocks(css='style.css') as demo: - gr.Markdown(DESCRIPTION) - with gr.Row(): - with gr.Column(): - image = gr.Image(label='Input image', type='filepath') - threshold = gr.Slider( - label='Threshold used for producing binary graph', - minimum=0, - maximum=1, - step=0.01, - value=0.15) - max_masks = gr.Slider( - label='The maximum number of pseudo-masks per image', - minimum=1, - maximum=20, - step=1, - value=6) - run_button = gr.Button('Run') - with gr.Column(): - result = gr.Image(label='Result') - - inputs = [image, threshold, max_masks] - gr.Examples(examples=[[path.as_posix(), 0.15, 6] for path in paths], - inputs=inputs, - outputs=result, - fn=run, - cache_examples=os.getenv('CACHE_EXAMPLES') == '1') - - run_button.click(fn=run, inputs=inputs, outputs=result, api_name='run') -demo.queue(max_size=20).launch() diff --git a/spaces/fatiXbelha/sd/Download Genshin Impact for Free and Start Your Journey Across Seven Nations.md b/spaces/fatiXbelha/sd/Download Genshin Impact for Free and Start Your Journey Across Seven Nations.md deleted file mode 100644 index d83b7c2ae757cbd1572c794fa7a205a1c941009c..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Genshin Impact for Free and Start Your Journey Across Seven Nations.md +++ /dev/null @@ -1,146 +0,0 @@ - -

Download Genshin Impact for Free: A Guide for Beginners

-

If you are looking for a new and exciting game to play, you might want to check out Genshin Impact, a free-to-play open-world action RPG that has taken the gaming world by storm. In this game, you can explore a vast and beautiful fantasy world called Teyvat, where you can meet a diverse cast of characters, fight powerful enemies, solve puzzles, collect loot, and uncover mysteries. You can also team up with your friends across different platforms and enjoy co-op events and challenges.

-

But how do you download Genshin Impact for free? And what do you need to know before you start playing? In this guide, we will answer these questions and more, so you can enjoy this amazing game without any hassle.

-

download genshin impact for free


DOWNLOADhttps://urllie.com/2uNyfc



-

How to Download Genshin Impact for Free on Different Platforms

-

One of the best things about Genshin Impact is that it is available on multiple platforms, including PC, mobile devices (Android and iOS), PlayStation 4, PlayStation 5, and Nintendo Switch (coming soon). And the best part is that you can download it for free on any of these platforms. Here's how:

-
    -
  • PC: To download Genshin Impact on PC, you need to visit the official website of the game and click on the "Windows" button. This will download the launcher file, which you need to run and install. Then, open the launcher and click on "Get Game" to download the game files. Once the download is complete, you can click on "Launch" to start playing.
  • -
  • Mobile: To download Genshin Impact on mobile devices, you need to visit the Google Play Store or the App Store depending on your device, and search for "Genshin Impact". Then, tap on "Install" or "Get" to download the game. Once the download is complete, you can tap on the game icon to start playing.
  • -
  • PlayStation 4/PlayStation 5: To download Genshin Impact on PlayStation 4 or PlayStation 5, you need to visit the PlayStation Store on your console, and search for "Genshin Impact". Then, select the game and click on "Download" or "Add to Library" to download the game. Once the download is complete, you can select the game from your library or home screen to start playing.
  • -
  • Nintendo Switch: To download Genshin Impact on Nintendo Switch, you will have to wait a little longer, as the game is still in development for this platform. However, according to the developers, miHoYo, the game will be released on Switch soon . We will update this guide when more information is available.
  • -
-

How to Create an Account and Start Playing

-

After downloading Genshin Impact on your preferred platform, you need to create an account to start playing

After downloading Genshin Impact on your preferred platform, you need to create an account to start playing. You can do this by following these steps:

-
    -
  • PC: When you launch the game, you will see a login screen where you can choose to register with your email or username, or log in with your existing miHoYo account, Facebook, Twitter, or Apple ID. If you don't have a miHoYo account, you can create one by clicking on "Register Now" and following the instructions. You will need to verify your email and set a password. Then, you can log in with your email or username and password.
  • -
  • Mobile: When you launch the game, you will see a login screen where you can choose to register with your email or phone number, or log in with your existing miHoYo account, Facebook, Twitter, Apple ID, or Google Play Games. If you don't have a miHoYo account, you can create one by clicking on "Register Now" and following the instructions. You will need to verify your email or phone number and set a password. Then, you can log in with your email or phone number and password.
  • -
  • PlayStation 4/PlayStation 5: When you launch the game, you will see a login screen where you can choose to log in with your existing PlayStation Network account or create a new one. If you don't have a PlayStation Network account, you can create one by clicking on "Create New Account" and following the instructions. You will need to enter your email, date of birth, country/region, and password. Then, you can log in with your email and password.
  • -
  • Nintendo Switch: When the game is released on Nintendo Switch, we will update this guide with the steps to create an account and start playing.
  • -
-

After logging in, you will be able to choose your server region and your preferred language. You can also customize your character's appearance and name. Then, you will be ready to start your adventure in Teyvat.

-

Genshin Impact Features and Gameplay

-

Genshin Impact is a game that offers a lot of features and gameplay elements that will keep you entertained for hours. Here are some of the main aspects of the game that you should know:

-

The Massive Open World of Teyvat and Its Seven Nations

-

Genshin Impact is set in a vast and beautiful world called Teyvat, which is divided into seven nations: Mondstadt, Liyue, Inazuma, Sumeru, Fontaine, Natlan, and Snezhnaya. Each nation has its own culture, history, landscape, and elemental affinity. You can explore each nation freely and discover its secrets, landmarks, quests, NPCs, enemies, resources, and treasures.

-

You can also interact with the environment in various ways, such as climbing mountains, gliding through the air, swimming across rivers, lighting torches, breaking rocks, picking flowers, cooking food, fishing for fish , and more. The world of Teyvat is full of surprises and wonders that will make you feel like a true adventurer.

-

The Elemental Combat System and the Diverse Characters

-

Genshin Impact is a game that features an innovative and dynamic combat system based on elemental interactions. You can control up to four characters at a time , each with their own unique skills , weapons , and elemental attributes . You can switch between them at any time during battle , depending on the situation . You can also combine their skills to create powerful elemental reactions , such as burning , freezing , electro-charged , superconduct , swirl , crystallize , melt , and vaporize . These reactions can deal extra damage , inflict status effects , or provide buffs or debuffs to your enemies or allies .

-

How to download genshin impact for free on PC
-Download genshin impact for free on Epic Games Store
-Genshin impact free download for Android and iOS
-Best tips and tricks for genshin impact free download
-Genshin impact free download guide and walkthrough
-Download genshin impact for free and explore Teyvat
-Genshin impact free download: how to unlock all characters
-Genshin impact free download: how to master elemental combat
-Genshin impact free download: how to play with friends
-Genshin impact free download: how to get primogems and wishes
-Genshin impact free download vs paid version: what's the difference?
-Genshin impact free download: how to update the game
-Genshin impact free download: how to fix common errors and issues
-Genshin impact free download: how to customize your character
-Genshin impact free download: how to level up fast and easy
-Download genshin impact for free and enjoy the beautiful visuals and soundtrack
-Genshin impact free download: how to access new areas and events
-Genshin impact free download: how to craft weapons and artifacts
-Genshin impact free download: how to join a guild and chat with other players
-Genshin impact free download: how to complete achievements and quests
-Download genshin impact for free from the official website
-Download genshin impact for free from Google Play Store
-Download genshin impact for free from App Store
-Download genshin impact for free from Steam
-Download genshin impact for free from PlayStation Store
-Download genshin impact for free without VPN or emulator
-Download genshin impact for free without registration or verification
-Download genshin impact for free without virus or malware
-Download genshin impact for free without survey or password
-Download genshin impact for free without root or jailbreak
-Is it legal to download genshin impact for free?
-Is it safe to download genshin impact for free?
-Is it worth it to download genshin impact for free?
-Is it possible to download genshin impact for free on Xbox or Switch?
-Is it easy to download genshin impact for free on Mac or Linux?
-What are the system requirements to download genshin impact for free?
-What are the benefits of downloading genshin impact for free?
-What are the drawbacks of downloading genshin impact for free?
-What are the alternatives to downloading genshin impact for free?
-What are the best sources to download genshin impact for free?

-

The game also offers a diverse roster of characters that you can collect and upgrade . There are currently 37 characters available in the game , each with their own personality , backstory , voice , and appearance . You can obtain new characters by using Primogems , the premium currency of the game , to perform Wishes , which are gacha-style draws that give you random rewards . You can also earn Primogems by completing quests , achievements , events , and other activities . You can also use other resources , such as Mora (the common currency) , Character EXP Materials , Talent Level-Up Materials , Weapon Ascension Materials , Artifacts (equipment that provide stats and bonuses) , Constellations (upgrades that enhance character skills) , and more to level up , ascend , refine , enhance , and customize your characters .

-

The Beautiful Visuals and the Soothing Soundtrack

-

Genshin Impact is a game that boasts stunning graphics

Genshin Impact is a game that boasts stunning graphics and a soothing soundtrack that will immerse you in the world of Teyvat. The game uses a cel-shaded art style that gives it a vibrant and colorful look. The game also features realistic lighting, shadows, reflections, weather effects, and physics that make the environment come to life. The game also supports 4K resolution and 60 FPS on PC and PS5, as well as HDR on PS4 and PS5.

-

The game also has a beautiful and relaxing soundtrack that matches the mood and atmosphere of each nation and region. The game features original music composed by Yu-Peng Chen and the HOYO-MiX team, as well as collaborations with the London Philharmonic Orchestra and the Shanghai Symphony Orchestra. The game also features voice acting in multiple languages, including English, Japanese, Chinese, Korean, French, German, Spanish, Russian, and Portuguese. You can choose your preferred language in the game settings.

-

The Cross-Platform Multiplayer Mode and the Co-op Events

-

Genshin Impact is a game that supports cross-platform multiplayer mode, which means you can play with your friends across different devices and platforms. You can invite up to three other players to join your world and explore, fight, and collect together. You can also join other players' worlds and help them out. You can also chat with other players using text or voice messages.

-

The game also features co-op events and challenges that require teamwork and coordination. For example, you can participate in the Spiral Abyss, a tower-like dungeon that tests your combat skills and rewards you with rare items. You can also join the Elemental Crucible, a timed event that requires you to collect elemental energy and deposit it in a furnace. You can also take part in seasonal events that offer exclusive rewards and stories.

-

Genshin Impact System Requirements and Tips

-

Genshin Impact is a game that requires a decent amount of system resources to run smoothly. Here are the minimum and recommended system requirements for PC, mobile, and PS4:

- | Platform | Minimum Requirements | Recommended Requirements | | --- | --- | --- | | PC | OS: Windows 7 SP1 64-bit or higher
CPU: Intel Core i5 or equivalent
RAM: 8 GB
GPU: NVIDIA GeForce GT 1030 or higher
DirectX: Version 11
Storage: 30 GB | OS: Windows 10 64-bit
CPU: Intel Core i7 or equivalent
RAM: 16 GB
GPU: NVIDIA GeForce GTX 1060 6 GB or higher
DirectX: Version 11
Storage: 30 GB | | Mobile | OS: Android 7.0 or higher / iOS 9.0 or higher
CPU: Qualcomm Snapdragon 845 / Kirin 810 or higher
RAM: 3 GB or higher
Storage: 8 GB or higher | OS: Android 8.1 or higher / iOS 10.0 or higher
CPU: Qualcomm Snapdragon 865 / Kirin 990 or higher
RAM: 4 GB or higher
Storage: 8 GB or higher | | PS4 | PS4 Slim with at least 30 GB of free space | PS4 Pro with at least 30 GB of free space |

If you want to optimize your game settings and performance, here are some tips:

-
    -
  • PC: You can adjust the graphics quality, resolution, frame rate, render resolution, anti-aliasing, shadows, textures, visual effects, and more in the game settings. You can also enable or disable V-sync , FPS limit , motion blur , depth of field , bloom , crowd density , and more . You can also use the custom mode to fine-tune each option according to your preference . You can also check your ping , FPS , GPU usage , CPU usage , memory usage , and network status in the game settings .
  • -
  • Mobile: You can adjust the graphics quality , frame rate , render resolution , anti-aliasing , shadows , textures , visual effects , and more in the game settings . You can also enable or disable auto-adjust graphics , battery saving mode , co-op team voice chat , HD assets download , custom controls layout , and more . You can also use the custom mode to fine-tune each option according to your preference . You can also check your ping , FPS , battery level , network status , storage space , and download progress in the game settings .
  • -
  • PS4: You can adjust the graphics mode (performance or quality) , frame rate (30 FPS or 60 FPS) , render resolution (1080p or 4K) , motion blur (on or off) , and co-op team voice chat in the game settings. You can also check your ping , FPS , network status , and download progress in the game settings.
  • -
-

Another tip to improve your game performance is to close any unnecessary background apps or programs that might consume your system resources or bandwidth. You can also update your device drivers , operating system , and game version to ensure the best compatibility and stability.

-

How to Manage Your Resources and Progress Efficiently

-

Genshin Impact is a game that requires a lot of time and effort to progress and unlock its full potential. You will need to manage your resources wisely and plan your activities accordingly. Here are some tips to help you with that:

-
    -
  • Adventure Rank (AR): This is the main indicator of your overall progress in the game. It determines what content and features you can access, such as quests, domains, bosses, events, and more. You can increase your AR by completing quests, exploring the world, opening chests, activating waypoints, unlocking statues, defeating enemies, and more. You will also receive rewards such as Primogems, Mora, items, and more for each AR level up.
  • -
  • Stamina: This is the resource that allows you to perform actions such as sprinting, climbing, gliding, swimming, and using certain skills. It is represented by a yellow bar on the top left corner of the screen. It will deplete as you perform these actions and regenerate over time or by consuming certain items. You can increase your maximum stamina by offering Anemoculus or Geoculus (collectible items that are scattered around the world) to the Statues of The Seven (giant statues that represent the gods of each nation).
  • -
  • Original Resin: This is the resource that allows you to claim rewards from certain activities such as domains, bosses, ley lines, and events. It is represented by a moon-shaped icon on the top right corner of the screen. It has a maximum capacity of 160 and regenerates at a rate of 1 per 8 minutes. You can also replenish it by using Fragile Resin (a rare item that restores 60 Original Resin) or Primogems (the premium currency that can restore 60 Original Resin for 50 Primogems). You can also increase your maximum capacity by 40 for 24 hours by using a Condensed Resin (a craftable item that consumes 40 Original Resin).
  • -
  • Daily Commissions: These are four random quests that you can complete every day to earn Primogems, Mora, items, and Adventure Rank EXP. They are refreshed at 4 AM server time every day and can be accessed from the Quest menu. They are usually simple tasks such as defeating enemies, collecting items, escorting NPCs, or solving puzzles. You can also claim an extra reward from the Adventurer's Guild after completing all four commissions.
  • -
  • Weekly Bosses: These are powerful enemies that you can challenge once a week to earn rare items such as character ascension materials, weapon ascension materials, artifacts, and more. They are Stormterror Dvalin (a giant dragon that resides in Mondstadt), Andrius (a wolf-like creature that roams in Wolvendom), Childe (a Fatui Harbinger that lurks in Liyue Harbor), and Azhdaha (a primordial beast that dwells in Nantianmen). They require 60 Original Resin to claim their rewards and are refreshed at 4 AM server time every Monday.
  • -
  • Spiral Abyss: This is a tower-like dungeon that consists of 12 floors with three chambers each. Each chamber has different enemies, objectives, and buffs or debuffs. You can earn Primogems, Mora, items, and Abyssal Stars by clearing each chamber. You can also earn Abyssal Spire (a currency that can be exchanged for rare items) by clearing certain floors with a certain number of stars. The first eight floors are permanent and have fixed difficulty levels. The last four floors are seasonal and have variable difficulty levels and rewards. They are refreshed at 4 AM server time every two weeks.
  • -
-

Genshin Impact Reviews and Ratings

-

Genshin Impact is a game that has received a lot of praise and criticism from critics and players alike. Here are some of the positive and negative aspects of the game according to different sources:

-

The Positive Aspects

-
    -
  • The game has a stunning and immersive open world that offers a lot of exploration and discovery.
  • -
  • The game has an innovative and fun combat system that allows for diverse and creative strategies.
  • -
  • The game has a charming and The game has a charming and diverse cast of characters that have their own personalities, stories, and voice acting.
  • -
  • The game has a beautiful and relaxing soundtrack that enhances the mood and atmosphere of the game.
  • -
  • The game has a lot of content and features that offer a variety of gameplay options and challenges.
  • -
  • The game supports cross-platform multiplayer mode that allows for co-op and social interactions.
  • -
  • The game is free-to-play and does not require any upfront payment or subscription.
  • -
-

The Negative Aspects

-
    -
  • The game has a gacha system that relies on RNG and can be frustrating and expensive for some players.
  • -
  • The game has a stamina system that limits the amount of activities and rewards that can be done and claimed per day.
  • -
  • The game has some technical issues and bugs that can affect the performance and quality of the game.
  • -
  • The game has some repetitive and grindy aspects that can become boring and tedious for some players.
  • -
  • The game has some controversial and sensitive topics that can offend or trigger some players.
  • -
-

The Average Scores and Ratings

-

Here are some of the average scores and ratings that Genshin Impact has received from different sources:

- | Source | Score/Rating | | --- | --- | | Metacritic (PC) | 84/100 (based on 22 critic reviews) | | Metacritic (PS4) | 83/100 (based on 13 critic reviews) | | Metacritic (User Score) | 7.1/10 (based on 2,254 user ratings) | | Google Play Store | 4.5/5 (based on 2,032,674 user ratings) | | App Store | 4.6/5 (based on 146,500 user ratings) | | PlayStation Store | 4.5/5 (based on 32,869 user ratings) |

The Future Updates and Plans for the Game

-

Genshin Impact is a game that is constantly being updated and improved by the developers, miHoYo. They have announced their roadmap for the future updates and plans for the game, which include:

-
    -
  • New regions, characters, quests, events, and features to be added regularly.
  • -
  • New platforms, such as Nintendo Switch and Xbox, to be supported in the future.
  • -
  • New languages, such as Thai, Vietnamese, Indonesian, Arabic, Polish, Turkish, Dutch, Swedish, Norwegian, Danish, Finnish, Hungarian, Czech, Slovakian, Romanian, Greek, Bulgarian, Hindi, Malaysian, Filipino, Hebrew, Ukrainian , Catalan , Croatian , Slovenian , Estonian , Lithuanian , Latvian , Serbian , Icelandic , Luxembourgish , Maltese , Afrikaans , Swahili , Zulu , Xhosa , Yoruba , Igbo , Hausa , Amharic , Somali , Oromo , Kinyarwanda , Kirundi , Ndebele , Shona , Tswana , Sotho , Tsonga , Venda , Swati , Nyanja , Chichewa , Chewa , Bemba , Luba-Katanga , Kongo , Lingala , Bambara , Wolof , Fula , Mandinka , Soninke , Serer-Sine-Saloum , and more to be added in the future.
  • -
  • New modes, such as housing system, dating system, fishing system, gardening system, and more to be added in the future.
  • -
  • New improvements, such as bug fixes, optimization, balance changes, quality of life features, and more to be implemented regularly.
  • -
-

Conclusion and FAQs

-

Genshin Impact is a free-to-play open-world action RPG that offers a lot of features and gameplay elements that will appeal to a wide range of players. You can download it for free on different platforms and create an account to start playing. You can explore a vast and beautiful world, fight with elemental skills, collect and upgrade characters, enjoy co-op events, and more. You can also check the system requirements and tips to optimize your game performance and progress. You can also read the reviews and ratings to see what others think of the game. And you can look forward to the future updates and plans that will add more content and features to the game.

-

If you are interested in playing Genshin Impact, you can visit the official website or the official social media accounts to learn more about the game and stay updated with the latest news and announcements. You can also join the official Discord server or the official Reddit community to interact with other players and get help or advice. You can also watch the official YouTube channel or Twitch channel to see gameplay videos and live streams.

-

We hope this guide has helped you learn how to download Genshin Impact for free and enjoy this amazing game. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you. Happy gaming!

-

FAQs

-

Here are some of the frequently asked questions and answers about Genshin Impact:

-
    -
  1. Q: Is Genshin Impact pay-to-win?
  2. -
  3. A: No, Genshin Impact is not pay-to-win. The game is free-to-play and does not require any payment or subscription to play. The game also does not have any paywalls or content that is exclusive to paying players. The game does have a gacha system that allows players to spend real money to obtain new characters and items, but these are not necessary to enjoy or complete the game. The game also provides plenty of ways to earn the premium currency, Primogems, for free by playing the game.
  4. -
  5. Q: Is Genshin Impact online-only?
  6. -
  7. A: Yes, Genshin Impact is online-only. The game requires a stable internet connection to play, as it is constantly updated and synced with the server. The game does not have an offline mode or a single-player mode. However, the game does have a solo mode that allows players to play alone without other players joining their world.
  8. -
  9. Q: Is Genshin Impact cross-save?
  10. -
  11. A: Yes, Genshin Impact is cross-save. The game allows players to use the same account and progress across different platforms and devices, except for PlayStation 4 and PlayStation 5. This means that if you play on PC, mobile, or Nintendo Switch (when it is released), you can switch between them without losing your data. However, if you play on PlayStation 4 or PlayStation 5, you can only play on those platforms with your account and progress.
  12. -
  13. Q: Is Genshin Impact safe for kids?
  14. -
  15. A: Genshin Impact is rated T for Teen by ESRB , which means that it may contain violence , blood , suggestive themes , alcohol reference , and in-game purchases . The game also has some controversial and sensitive topics that may offend or trigger some players . Therefore , we recommend that parents supervise their kids when playing the game or use parental controls to restrict access or limit play time . We also recommend that players be respectful and responsible when interacting with other players online .
  16. -
  17. Q: How long is Genshin Impact?
  18. -
  19. A: Genshin Impact is a game that has no definitive end or completion . The game is constantly being updated and expanded with new content and features . The game also has a lot of replay value , as it offers different choices , outcomes , secrets , achievements , events , challenges , and more . Therefore , the length of the game depends on how much you want to explore , collect , upgrade , and enjoy . However , according to HowLongToBeat.com , the average time to finish the main story of the game is about 50 hours, while the average time to complete all the content of the game is about 200 hours. Of course, these numbers may vary depending on your play style and preferences.
  20. -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/fclong/summary/fengshen/models/GAVAE/gans_model.py b/spaces/fclong/summary/fengshen/models/GAVAE/gans_model.py deleted file mode 100644 index 5880acf9c36c6dfd41cf6286f25a93501e64e5e5..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/models/GAVAE/gans_model.py +++ /dev/null @@ -1,484 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.utils.data import Dataset, DataLoader -import numpy as np - - -class MyDataset(Dataset): - def __init__(self, x, y): - self.x = x - self.y = y - self.len = self.x.size(0) - - def __getitem__(self, index): - return self.x[index], self.y[index] - - def __len__(self): - return self.len - - -class MyDataset_new(Dataset): - def __init__(self, x, y, s): - self.x = x - self.y = y - self.s = s - self.len = self.x.size(0) - - def __getitem__(self, index): - return self.x[index], self.y[index], self.s[index] - - def __len__(self): - return self.len - - -class CLS_Net(torch.nn.Module): - - def __init__(self, cls_num, z_dim, cls_batch_size): - super(CLS_Net, self).__init__() - - mini_dim = 256 #256 - - out_input_num = mini_dim - - base_dim = 64 #256 #64 - - self.cls_batch_size = cls_batch_size - self.jie = 1 - - self.fc1 = nn.Linear(z_dim, mini_dim) - self.fc1.weight.data.normal_(0, 0.1) - - self.fc2 = nn.Linear(out_input_num, base_dim) - self.fc2.weight.data.normal_(0, 0.1) - - self.out = nn.Linear(base_dim, cls_num) - self.out.weight.data.normal_(0, 0.1) - - def self_dis(self, a): - max_dim = self.cls_batch_size - jie = self.jie - - all_tag = False - for j in range(a.shape[0]): - col_tag = False - for i in range(a.shape[0]): - tmp = F.pairwise_distance(a[j,:], a[i,:] , p = jie).view(-1,1) - if col_tag == False: - col_dis = tmp - col_tag = True - else: - col_dis = torch.cat((col_dis, tmp), dim = 0) - if all_tag == False: - all_dis = col_dis - all_tag = True - else: - all_dis = torch.cat((all_dis, col_dis), dim = 1) - ''' - print(all_dis.shape) - if all_dis.shape[1] < max_dim: - all_dis = torch.cat((all_dis, all_dis[:,:(max_dim - all_dis.shape[1])]), dim = 1) - print(all_dis.shape) - ''' - return all_dis - - def forward(self, x): - - x = self.fc1(x) - x1 = F.relu(x) - - x2 = self.fc2(x1) - x2 = torch.nn.Dropout(0.1)(x2) #0.3 - x2 = F.relu(x2) - - y = self.out(x2) - - return y, x1 - - -class Gen_Net(torch.nn.Module): - - def __init__(self,input_x2_dim, output_dim): - super(Gen_Net, self).__init__() - - self.x2_input = nn.Linear(input_x2_dim , 60) - self.x2_input.weight.data.normal_(0, 0.1) - - self.fc1 = nn.Linear(60, 128) - self.fc1.weight.data.normal_(0, 0.1) - - self.fc2 = nn.Linear(128, 256) - self.fc2.weight.data.normal_(0, 0.1) - - self.fc3 = nn.Linear(256, 128) - self.fc3.weight.data.normal_(0, 0.1) - - self.out = nn.Linear(128, output_dim) - self.out.weight.data.normal_(0, 0.1) - - def forward(self,x2): - x2 = self.x2_input(x2) - - x = x2 - x = self.fc1(x) - x = F.relu(x) - - x = self.fc2(x) - x = F.relu(x) - - x = self.fc3(x) - x = F.relu(x) - y = self.out(x) - - return y - - -class gans_process(): - - def __init__(self, config): - - #base pare - self.device = config.device - self.cls_num = config.cls_num - self.x2_dim = config.noise_dim - self.z_dim = config.z_dim - - self.cls_lr = config.cls_lr - self.gen_lr = config.gen_lr - self.cls_epoches = config.cls_epoches - self.gen_epoches = config.gen_epoches - self.mse_weight = 1.0 - - self.cls_batch_size = config.cls_batch_size - self.gen_batch_size = config.gen_batch_size - self.eval_batch_size = config.cls_batch_size - self.gen_batch_size = self.cls_batch_size - - #optimer and net - self.cls_net = CLS_Net(self.cls_num, self.z_dim, self.cls_batch_size).to(self.device) - self.cls_optimizer = torch.optim.SGD(self.cls_net.parameters(), - lr = self.cls_lr , weight_decay= 1e-5) - # gen net - self.gen_net = Gen_Net(self.x2_dim, self.z_dim).to(self.device) - - self.gen_optimizer = torch.optim.SGD(self.gen_net.parameters(), - lr = self.gen_lr , weight_decay= 0.01) - - #base loss - self.loss_func = torch.nn.CrossEntropyLoss() - self.loss_mse = torch.nn.MSELoss() - - def freeze_cls(self): - for param in self.cls_net.parameters(): - param.requires_grad = False - - def unfreeze_cls(self): - for param in self.cls_net.parameters(): - param.requires_grad = True - - def freeze_gen(self): - for param in self.gen_net.parameters(): - param.requires_grad = False - - def unfreeze_gen(self): - for param in self.gen_net.parameters(): - param.requires_grad = True - - def labels2genx(self, sample_num): - x = torch.rand(sample_num, self.x2_dim) - return x.to(self.device) - - def pad_batch(self, x): - if int(x.shape[0] % self.cls_batch_size) == 0: - return x - pad_len = self.cls_batch_size - ( x.shape[0] % self.cls_batch_size) - x = torch.cat((x, x[:pad_len]), dim = 0) - return x - - def ready_cls(self, sent_output,perm=None): - sample_num = len(sent_output) - #---------------make fake z--------------- - sent_output = sent_output.to(self.device) - sent_noise = torch.tensor(self.gen_test(sample_num)).to(self.device) - - #--------------handle datas--------------- - x = torch.cat((sent_output, sent_noise), dim = 0 ) - if perm is None: - perm = torch.randperm(len(x)) - x = x[perm] - #add y - only one label per time - multi_label_num = 1 - multi_output_y = torch.tensor([0]*sample_num).unsqueeze(1) - multi_noise_y = torch.zeros([sent_noise.size(0),1], dtype = torch.int) - multi_noise_y = multi_noise_y + multi_label_num - - y = torch.cat((multi_output_y, multi_noise_y), dim = 0).to(self.device) - y = y[perm] - # x_train = x [:self.train_len] - # y_train = y [:self.train_len] - # x_test = x [self.train_len:] - # y_test = y [self.train_len:] - - return x,y,None,None,perm - - def ready_fake(self, sent_output, inputs_labels, inputs_indexs, label2id, perm = None): - - #---------------make fake z--------------- - sent_output = sent_output.to(self.device) - sent_noise = torch.tensor(self.gen_test(inputs_labels, inputs_indexs)).to(self.device) - - #--------------handle datas--------------- - x = sent_noise - y = torch.tensor(inputs_labels).unsqueeze(1) - if perm is None: - perm = torch.randperm(len(x)) - x = x[perm] - y = y[perm] - - return x,y,perm - - def ready_gen(self, sent_output): - #, inputs_labels, inputs_indexs - sent_num = len(sent_output) - sent_output = sent_output.to(self.device) - x2 = self.labels2genx(sent_num) - y = torch.tensor([0]*sent_num).unsqueeze(1).to(self.device) - - return x2, y, sent_output - - def cls_train(self, x, y, if_oneHot = True): - - #init - self.cls_net.train() - self.gen_net.eval() - - self.unfreeze_cls() - self.freeze_gen() - - x = x.to(self.device) - y = y.to(self.device) - - #if oneHot - if if_oneHot: - y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1) - #make dataset - mydataset = MyDataset(x, y) - train_loader = DataLoader(dataset=mydataset, - batch_size=self.cls_batch_size, shuffle=True) - - #training - for epoch in range(self.cls_epoches): - losses = [] - accuracy = [] - for step, (batch_x, batch_y) in enumerate(train_loader): - self.cls_optimizer.zero_grad() - - out, _ = self.cls_net(batch_x) - loss = self.loss_func(out, batch_y) - - #One-side label smoothing -not used - #location 0 real, location 1 fake - batch_y = batch_y * torch.tensor([0.9, 1.0]).to(self.device) - - loss.backward() - self.cls_optimizer.step() - #tqdm - _, predictions = out.max(1) - predictions = predictions.cpu().numpy().tolist() - _,real_y = batch_y.max(1) - real_y = real_y.cpu().numpy().tolist() - - num_correct = np.sum([int(x==y) for x,y in zip(predictions, real_y)]) - running_train_acc = float(num_correct) / float(batch_x.shape[0]) - losses.append(loss) - accuracy.append(running_train_acc) - - - return self.cls_net - - def cls_eval(self, x, y, if_oneHot = True): - - #init - self.cls_net.eval() - x = x.to(self.device) - y = y.to(self.device) - - #if oneHot - if if_oneHot: - y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1) - #make dataset - mydataset = MyDataset(x, y) - train_loader = DataLoader(dataset=mydataset, - batch_size=self.eval_batch_size, shuffle=False) - - losses = [] - accuracy = [] - #evaling - for step, (batch_x, batch_y) in enumerate(train_loader): - out,_ = self.cls_net(batch_x) - loss = self.loss_func(out, batch_y) - - #tqdm - _, predictions = out.max(1) - predictions = predictions.cpu().numpy().tolist() - _,real_y = batch_y.max(1) - real_y = real_y.cpu().numpy().tolist() - - num_correct = np.sum([int(x==y) for x,y in zip(predictions, real_y)]) - running_train_acc = float(num_correct) / float(batch_x.shape[0]) - accuracy.append(running_train_acc) - - - mean_acc = np.mean(accuracy) - return mean_acc - - def cls_real_eval(self, x, y, if_oneHot = True): - - #init - self.cls_net.eval() - x = x.to(self.device) - y = y.to(self.device) - - #if oneHot - if if_oneHot: - y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1) - #make dataset - mydataset = MyDataset(x, y) - train_loader = DataLoader(dataset=mydataset, - batch_size=self.eval_batch_size, shuffle=False) - - rs = 0 - alls = 0 - - #evaling - for step, (batch_x, batch_y) in enumerate(train_loader): - out, _ = self.cls_net(batch_x) - loss = self.loss_func(out, batch_y) - - #tqdm - _, predictions = out.max(1) - predictions = predictions.cpu().numpy().tolist() - _,real_y = batch_y.max(1) - real_y = real_y.cpu().numpy().tolist() - - right_num = np.sum([int( x==y and int(y) != int(self.cls_num-1) ) for x,y in zip(predictions, real_y)]) - all_num = np.sum([int(int(y) != int(self.cls_num-1) ) for x,y in zip(predictions, real_y)]) - - rs = rs + right_num - alls = alls + all_num - - - return rs/alls - - def cls_test(self, x, if_oneHot = True): - - #init - self.cls_net.eval() - x = x.to(self.device) - y = torch.zeros([x.size(0),1], dtype = torch.float).to(self.device) - - #if oneHot - if if_oneHot: - y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1) - #make dataset - mydataset = MyDataset(x, y) - train_loader = DataLoader(dataset=mydataset, - batch_size=self.eval_batch_size, shuffle=False) - - preds = [] - #testing - for step, (batch_x, batch_y) in enumerate(train_loader): - out, _ = self.cls_net(batch_x) - loss = self.loss_func(out, batch_y) - - #tqdm - _, predictions = out.max(1) - predictions = predictions.cpu().numpy().tolist() - preds.extend(predictions) - - return preds - - def gen_train(self, x2, y, s, times): - - #init - self.cls_net.eval() - self.gen_net.train() - - self.freeze_cls() - self.unfreeze_gen() - - #y is gen + cls - y = torch.zeros(y.size(0), self.cls_num).to(self.device).scatter_(1, y.long(), 1) - - #make dataset - mydataset = MyDataset_new(x2, y, s) - train_loader = DataLoader(dataset=mydataset, - batch_size=self.gen_batch_size, shuffle=True) - - #training - for epoch in range(self.gen_epoches): - losses = [] - accuracy = [] - for step, (batch_x2, batch_y, batch_s) in enumerate(train_loader): - - # no zero_grad = make batch_size - if step % 6 == 5: #23 - self.gen_optimizer.zero_grad() - - out = self.gen_net(batch_x2) - - #fearture matching - out, hds = self.cls_net(out) - out2, hds2 = self.cls_net(batch_s.float()) - loss = self.loss_mse(hds, hds2) - loss = loss * pow(0.9, times) - loss.backward() - self.gen_optimizer.step() - - #tqdm - _, predictions = out.max(1) - predictions = predictions.cpu().numpy().tolist() - _, real_y = batch_y.max(1) - real_y = real_y.cpu().numpy().tolist() - - num_correct = np.sum([int(x==y) for x,y in zip(predictions, real_y)]) - running_train_acc = float(num_correct) / float(batch_x2.shape[0]) - losses.append(loss) - accuracy.append(running_train_acc) - - return self.gen_net - - def gen_test(self, sample_num): - - #init - self.gen_net.eval() - x2 = self.labels2genx(sample_num) - #x2: len(inputs_labels) * 80 - y = torch.zeros([sample_num,1], dtype = torch.float) - y = torch.zeros(sample_num, self.z_dim).scatter_(1, y.long(), 1) - y = y.to(self.device) - s = torch.ones((sample_num, self.z_dim)).to(self.device) - - #make dataset - mydataset = MyDataset_new(x2, y, s) - train_loader = DataLoader(dataset=mydataset, - batch_size=self.eval_batch_size, shuffle=False) - - preds = [] - #testing - for step, (batch_x2, batch_y, batch_s) in enumerate(train_loader): - - out = self.gen_net(batch_x2) - - loss = self.loss_mse(out.double(), batch_s.double()) - - predictions = out.cpu().detach().numpy().tolist() - preds.extend(predictions) - - return preds - - -if __name__ == '__main__': - - pass - diff --git a/spaces/fengmuxi/ChatGpt-Web/app/bing-chat/index.js b/spaces/fengmuxi/ChatGpt-Web/app/bing-chat/index.js deleted file mode 100644 index 1904031c5751b5701407916ab300919a9e61ac5a..0000000000000000000000000000000000000000 --- a/spaces/fengmuxi/ChatGpt-Web/app/bing-chat/index.js +++ /dev/null @@ -1,284 +0,0 @@ -// src/bing-chat.ts -import crypto from "node:crypto"; -import WebSocket from "ws"; - -// src/fetch.ts -var fetch = globalThis.fetch; -if (typeof fetch !== "function") { - throw new Error("Invalid environment: global fetch not defined"); -} - -// src/bing-chat.ts -var terminalChar = ""; -var BingChat = class { - constructor(opts) { - const { cookie, debug = false } = opts; - this._cookie = cookie; - this._debug = !!debug; - if (!this._cookie) { - throw new Error("Bing cookie is required"); - } - } - /** - * Sends a message to Bing Chat, waits for the response to resolve, and returns - * the response. - * - * If you want to receive a stream of partial responses, use `opts.onProgress`. - * - * @param message - The prompt message to send - * @param opts.conversationId - Optional ID of a conversation to continue (defaults to a random UUID) - * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated - * - * @returns The response from Bing Chat - */ - async sendMessage(text, opts = {}) { - const { - invocationId = "1", - onProgress, - locale = "en-US", - market = "en-US", - region = "US", - location, - messageType = "Chat", - variant = "Balanced", - } = opts; - let { conversationId, clientId, conversationSignature } = opts; - const isStartOfSession = !( - conversationId && - clientId && - conversationSignature - ); - if (isStartOfSession) { - const conversation = await this.createConversation(); - conversationId = conversation.conversationId; - clientId = conversation.clientId; - conversationSignature = conversation.conversationSignature; - } - const result = { - author: "bot", - id: crypto.randomUUID(), - conversationId, - clientId, - conversationSignature, - invocationId: `${parseInt(invocationId, 10) + 1}`, - text: "", - }; - const responseP = new Promise(async (resolve, reject) => { - const chatWebsocketUrl = "wss://sydney.bing.com/sydney/ChatHub"; - const ws = new WebSocket(chatWebsocketUrl, { - perMessageDeflate: false, - headers: { - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - pragma: "no-cache", - }, - }); - let isFulfilled = false; - function cleanup() { - ws.close(); - ws.removeAllListeners(); - } - ws.on("error", (error) => { - console.warn("WebSocket error:", error); - cleanup(); - if (!isFulfilled) { - isFulfilled = true; - reject(new Error(`WebSocket error: ${error.toString()}`)); - } - }); - ws.on("close", () => {}); - ws.on("open", () => { - ws.send(`{"protocol":"json","version":1}${terminalChar}`); - }); - let stage = 0; - ws.on("message", (data) => { - var _a, _b; - const objects = data.toString().split(terminalChar); - const messages = objects - .map((object) => { - try { - return JSON.parse(object); - } catch (error) { - return object; - } - }) - .filter(Boolean); - if (!messages.length) { - return; - } - if (stage === 0) { - ws.send(`{"type":6}${terminalChar}`); - const traceId = crypto.randomBytes(16).toString("hex"); - const locationStr = location - ? `lat:${location.lat};long:${location.lng};re=${ - location.re || "1000m" - };` - : void 0; - const optionsSets = [ - "nlu_direct_response_filter", - "deepleo", - "enable_debug_commands", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - "trffovrd", - "h3toppfp3", - "forcerep", - "cpcttl1d", - "dv3sugg", - ]; - if (variant == "Balanced") { - optionsSets.push("galileo"); - optionsSets.push("glprompt"); - } else if (variant == "Creative") { - optionsSets.push("h3imaginative"); - optionsSets.push("gencontentv3"); - } else if (variant == "Precise") { - optionsSets.push("h3precise"); - } - const params = { - arguments: [ - { - source: "cib", - optionsSets, - allowedMessageTypes: [ - "Chat", - "InternalSearchQuery", - "InternalSearchResult", - "InternalLoaderMessage", - "RenderCardRequest", - "AdsQuery", - "SemanticSerp", - ], - sliceIds: [], - traceId, - isStartOfSession, - message: { - locale, - market, - region, - location: locationStr, - author: "user", - inputMethod: "Keyboard", - messageType, - text, - }, - conversationSignature, - participant: { id: clientId }, - conversationId, - }, - ], - invocationId, - target: "chat", - type: 4, - }; - if (this._debug) { - console.log(chatWebsocketUrl, JSON.stringify(params, null, 2)); - } - ws.send(`${JSON.stringify(params)}${terminalChar}`); - ++stage; - return; - } - for (const message of messages) { - if (message.type === 1) { - const update = message; - const msg = - (_a = update.arguments[0].messages) == null ? void 0 : _a[0]; - if (!msg) continue; - if (!msg.messageType) { - result.author = msg.author; - result.text = msg.text; - result.detail = msg; - onProgress == null ? void 0 : onProgress(result); - } - } else if (message.type === 2) { - const response = message; - if (this._debug) { - console.log("RESPONSE", JSON.stringify(response, null, 2)); - } - const validMessages = - (_b = response.item.messages) == null - ? void 0 - : _b.filter((m) => !m.messageType); - const lastMessage = - validMessages == null - ? void 0 - : validMessages[ - (validMessages == null ? void 0 : validMessages.length) - 1 - ]; - if (lastMessage) { - result.conversationId = response.item.conversationId; - result.conversationExpiryTime = - response.item.conversationExpiryTime; - result.author = lastMessage.author; - result.text = lastMessage.text; - result.detail = lastMessage; - if (!isFulfilled) { - isFulfilled = true; - resolve(result); - } - } - } else if (message.type === 3) { - if (!isFulfilled) { - isFulfilled = true; - resolve(result); - } - cleanup(); - return; - } else { - } - } - }); - }); - return responseP; - } - async createConversation() { - const requestId = crypto.randomUUID(); - const cookie = this._cookie.includes(";") - ? this._cookie - : `_U=${this._cookie}`; - return fetch("https://www.bing.com/turing/conversation/create", { - headers: { - accept: "application/json", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "sec-ch-ua": - '"Not_A Brand";v="99", "Microsoft Edge";v="109", "Chromium";v="109"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"109.0.1518.78"', - "sec-ch-ua-full-version-list": - '"Not_A Brand";v="99.0.0.0", "Microsoft Edge";v="109.0.1518.78", "Chromium";v="109.0.5414.120"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": "", - "sec-ch-ua-platform": '"macOS"', - "sec-ch-ua-platform-version": '"12.6.0"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "x-edge-shopping-flag": "1", - "x-ms-client-request-id": requestId, - "x-ms-useragent": - "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/MacIntel", - "x-forwarded-for": "1.1.1.1", - cookie, - }, - referrer: "https://www.bing.com/search", - referrerPolicy: "origin-when-cross-origin", - body: null, - method: "GET", - mode: "cors", - credentials: "include", - }).then((res) => { - if (res.ok) { - return res.json(); - } else { - throw new Error( - `unexpected HTTP error createConversation ${res.status}: ${res.statusText}`, - ); - } - }); - } -}; -export { BingChat }; -//# sourceMappingURL=index.js.map diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Descubre el misterio del Tomb of the Mask APK sin anuncios y escala las paredes como nunca antes..md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Descubre el misterio del Tomb of the Mask APK sin anuncios y escala las paredes como nunca antes..md deleted file mode 100644 index 920f91297667e01309fd50dc11f83d85a73fd457..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Descubre el misterio del Tomb of the Mask APK sin anuncios y escala las paredes como nunca antes..md +++ /dev/null @@ -1,119 +0,0 @@ -
-

Tomb of the Mask Apk Sin Anuncios: How to Enjoy This Retro Arcade Game Without Ads

-

If you are a fan of retro arcade games, you might have heard of Tomb of the Mask, a popular mobile game developed by Playgendary. The game has received positive reviews from players and critics alike. The game's fast-paced gameplay, challenging levels, and retro-style graphics have been praised by many .

-

Tomb of the Mask is an arcade game, which takes place in an infinite procedurally generated vertical labyrinth. Seeking for adventure, you get into a tomb, where you find a strange mask. You put it on and suddenly realize that you can now climb walls - easily and promptly. And that's when all the fun begins.

-

tomb of the mask apk sin anuncios


DOWNLOAD ✪✪✪ https://gohhs.com/2uPpn2



-

However, as much as you enjoy playing this game, you might also get annoyed by the frequent ads that pop up on your screen. Ads can be distracting, irritating, and sometimes even malicious. They can ruin your gaming experience and waste your time and data. That's why some people prefer to download the apk version of Tomb of the Mask without ads, or tomb of the mask apk sin anuncios in Spanish.

-

But what is an apk file and how can you use it to play Tomb of the Mask without ads? In this article, we will explain everything you need to know about this option, as well as some other ways to remove ads from Android games.

-

How to Download and Install Tomb of the Mask Apk Sin Anuncios

-

An apk file is a package file format used by Android devices to distribute and install applications. It contains all the files and code needed to run an app on your phone or tablet. You can download apk files from various sources online, such as APKCombo or APKPure. However, you should always be careful when downloading apk files from unknown or untrusted sources, as they may contain viruses or malware that can harm your device.

-

To download and install Tomb of the Mask apk sin anuncios, you will need to follow these steps:

-
    -
  1. Enable unknown sources on your device. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
  2. -
  3. Download Tomb of the Mask apk sin anuncios from a reliable source. You can search for it online or use one of the links we provided above.
  4. -
  5. Locate the downloaded file on your device and tap on it to start the installation process. You may need to grant some permissions for the app to access your device's features.
  6. -
  7. Wait for the installation to finish and then launch the app. You should now be able to play Tomb of the Mask without ads.
  8. -
-

Some of the benefits of using an apk file to play Tomb of the Mask without ads are:

-

tomb of the mask apk mod sin anuncios
-tomb of the mask apk premium sin anuncios
-tomb of the mask apk full sin anuncios
-tomb of the mask apk pro sin anuncios
-tomb of the mask apk hack sin anuncios
-tomb of the mask apk descargar gratis sin anuncios
-tomb of the mask apk ultima version sin anuncios
-tomb of the mask apk mega sin anuncios
-tomb of the mask apk mediafire sin anuncios
-tomb of the mask apk android sin anuncios
-tomb of the mask apk para pc sin anuncios
-tomb of the mask apk online sin anuncios
-tomb of the mask apk offline sin anuncios
-tomb of the mask apk infinito sin anuncios
-tomb of the mask apk ilimitado sin anuncios
-tomb of the mask apk actualizado sin anuncios
-tomb of the mask apk 2023 sin anuncios
-tomb of the mask apk 2022 sin anuncios
-tomb of the mask apk 2021 sin anuncios
-tomb of the mask apk 2020 sin anuncios
-tomb of the mask apk 2019 sin anuncios
-tomb of the mask apk 2018 sin anuncios
-tomb of the mask apk 2017 sin anuncios
-tomb of the mask apk 2016 sin anuncios
-tomb of the mask apk 2015 sin anuncios
-tomb of the mask juego gratis sin anuncios
-tomb of the mask juego online sin anuncios
-tomb of the mask juego descargar sin anuncios
-tomb of the mask juego completo sin anuncios
-tomb of the mask juego hackeado sin anuncios
-descargar tomb of the mask para android sin anuncios
-descargar tomb of the mask para pc sin anuncios
-descargar tomb of the mask mod apk sin anuncios
-descargar tomb of the mask hackeado apk sin anuncios
-descargar tomb of the mask ultima version apk sin anuncios
-descargar tomb of the mask gratis para android sin anuncios
-descargar tomb of the mask gratis para pc sin anuncios
-como jugar a tomb of the mask sin anuncios
-como descargar a tomb of the mask sin anuncios
-como instalar a tomb of the mask sin anuncios
-como hackear a tomb of the mask sin anuncios
-como actualizar a tomb of the mask sin anuncios
-como eliminar los anuncios de tomb of the mask
-como quitar los anuncios de tomb of the mask
-como bloquear los anuncios de tomb of the mask
-como desactivar los anuncios de tomb of the mask
-como evitar los anuncios de tomb of the mask
-como saltar los anuncios de tomb of the mask
-como jugar a tomb of the mask con internet y sin publicidad

-
    -
  • You can save your data and battery life by not loading ads.
  • -
  • You can enjoy a smoother and faster gameplay without interruptions.
  • -
  • You can access some features or content that may not be available in your region or on Google Play Store.
  • -
-

However, there are also some risks involved in using an apk file, such as:

-
    -
  • You may expose your device to security threats or - You may violate the terms and conditions of the game or the app store. - You may not receive updates or support from the developers of the game. - You may encounter bugs or errors that affect the game's performance or functionality. Therefore, you should always weigh the pros and cons of using an apk file before deciding to download and install it. You should also backup your data and scan your device regularly for any potential threats.

    How to Play Tomb of the Mask Without Ads

    -

    If you don't want to use an apk file to play Tomb of the Mask without ads, there are some other options you can try. Here are some tips and tricks to enjoy this game more without ads:

    -
      -
    • Turn off your internet connection. This will prevent the game from loading any ads, but it will also disable some features, such as leaderboards, achievements, and daily quests.
    • -
    • Buy the premium version of the game. This will cost you $2.99, but it will remove all ads and give you some extra benefits, such as unlimited energy, 150 coins, and no cooldown for power-ups.
    • -
    • Use an ad blocker app. This will block ads from appearing on your device, but it may also interfere with some apps or websites that rely on ads for revenue. You should always check the compatibility and the reviews of the ad blocker app before installing it.
    • -
    -

    Another way to play Tomb of the Mask without ads is to look for some alternatives to this game that have similar gameplay and graphics, but less or no ads. Some examples are:

    - - - - - - - - - - - - - - - - - - - - - -
    GameDescriptionPrice
    Super Dangerous DungeonsA platformer game where you explore dungeons filled with traps and treasures.Free with optional in-app purchases.
    DownwellA roguelike game where you descend into a well armed with gunboots.$2.99 with no ads or in-app purchases.
    Slayin 2An endless action RPG where you slay monsters and bosses with different characters and weapons.$5.99 with no ads or in-app purchases.
    -

    Conclusion

    -

    Tomb of the Mask is a fun and addictive arcade game that will keep you entertained for hours. However, if you want to play it without ads, you have several options to choose from. You can download and install Tomb of the Mask apk sin anuncios, which is a modified version of the game that removes all ads. You can also turn off your internet connection, buy the premium version of the game, use an ad blocker app, or look for some alternative games that have less or no ads.

    -

    Whatever option you choose, make sure you are aware of the benefits and the risks involved. Always download apk files from trusted sources, backup your data, scan your device, and respect the developers' rights. And most importantly, have fun playing Tomb of the Mask!

    -

    FAQs

    -

    Here are some frequently asked questions about Tomb of the Mask:

    -

    What is the difference between Tomb of the Mask and Tomb of the Mask: Color?

    -

    Tomb of the Mask: Color is a spin-off of Tomb of the Mask that adds a new twist to the gameplay. In this game, you have to collect colored dots that match your mask's color while avoiding dots that don't. The game also features new masks, power-ups, enemies, and levels.

    -

    How many levels are there in Tomb of the Mask?

    -

    Tomb of the Mask has two modes: Adventure and Arcade. In Adventure mode, there are 200 levels divided into 10 chapters. In Arcade mode, there are infinite levels that get harder as you progress.

    -

    How can I unlock more masks and power-ups in Tomb of the Mask?

    -

    You can unlock more masks and power-ups by collecting coins and gems in the game. You can also watch ads or buy them with real money. Each mask and power-up has a different effect on your gameplay, such as speed boost, magnetism, invincibility, etc.

    -

    Is Tomb of the Mask safe for kids to play?

    -

    Tomb of the Mask is rated 4+ on Google Play Store and 9+ on App Store. The game does not contain any violence, blood, gore, or profanity. However, it does have - It does have some ads that may be inappropriate or annoying for some kids. You can remove the ads by using one of the methods we discussed above. - It also has some in-app purchases that may tempt some kids to spend real money. You can disable in-app purchases by going to Settings > Google Play > User Controls > Require Authentication for Purchases and choosing a suitable option.

    How can I contact the developers of Tomb of the Mask?

    -

    If you have any questions, feedback, or issues with Tomb of the Mask, you can contact the developers by emailing them at support@playgendary.com. You can also visit their website at https://playgendary.com/ or follow them on social media platforms such as Facebook, Twitter, Instagram, and YouTube.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Euro Truck Simulator 2 Indonesia Lengkap Mod Cara Instal dan Update ETS2 Indonesia.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Euro Truck Simulator 2 Indonesia Lengkap Mod Cara Instal dan Update ETS2 Indonesia.md deleted file mode 100644 index 4caa700bb2315f188175c119c310873b02e1f639..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Euro Truck Simulator 2 Indonesia Lengkap Mod Cara Instal dan Update ETS2 Indonesia.md +++ /dev/null @@ -1,150 +0,0 @@ - -

    Download Euro Truck Simulator 2 Indonesia Lengkap + Mod

    -

    If you are a fan of driving simulation games, you might have heard of Euro Truck Simulator 2, one of the most popular and realistic truck driving games ever made. But did you know that you can also enjoy this game with an Indonesian twist? And that you can enhance your gaming experience with various mods that add new features, vehicles, maps, and more? In this article, we will show you how to download Euro Truck Simulator 2 Indonesia lengkap + mod, which means complete with mod in Indonesian. Read on to find out more!

    -

    What is Euro Truck Simulator 2?

    -

    Euro Truck Simulator 2 is a game developed by SCS Software that lets you drive trucks across Europe, delivering cargo and exploring different cities and countries. You can customize your truck, upgrade your skills, manage your business, and enjoy the realistic graphics and physics of the game. You can also play online with other players, join convoys, and chat with them. Euro Truck Simulator 2 was released in 2012 and has received many updates and expansions since then. It is available for Windows, Mac, and Linux platforms.

    -

    download euro truck simulator 2 indonesia lengkap + mod


    DOWNLOAD ★★★ https://gohhs.com/2uPrJb



    -

    Features of Euro Truck Simulator 2

    -

    Some of the features of Euro Truck Simulator 2 are:

    -
      -
    • Over 70 European cities to visit, including London, Paris, Berlin, Rome, and more.
    • -
    • Thousands of kilometers of roads to drive on, with realistic landscapes and landmarks.
    • -
    • Over 40 licensed truck brands and models to choose from, such as Volvo, Scania, Mercedes-Benz, and more.
    • -
    • Hundreds of types of cargo to transport, from food and chemicals to cars and machinery.
    • -
    • A dynamic weather system that affects the driving conditions and visibility.
    • -
    • A day and night cycle that changes the scenery and traffic.
    • -
    • A radio feature that lets you listen to real or online radio stations while driving.
    • -
    • A photo mode that lets you capture and share your best moments.
    • -
    -

    How to download Euro Truck Simulator 2

    -

    To download Euro Truck Simulator 2, you have several options:

    -
      -
    • You can buy the game from the official website, where you can also find the latest news and updates about the game.
    • -
    • You can buy the game from Steam, where you can also access the Steam Workshop, which is a platform for sharing and downloading user-made mods for the game.
    • -
    • You can download a free trial version of the game from the official website, which lets you play for one hour with limited features.
    • -
    -

    What is Euro Truck Simulator 2 Indonesia?

    -

    Euro Truck Simulator 2 Indonesia is a mod that adds Indonesian elements to the game, such as trucks, trailers, traffic, signs, maps, and more. It is created by a community of Indonesian modders who want to share their passion for their country and culture with other players. It is compatible with the latest version of the game and does not require any additional DLCs.

    -

    Features of Euro Truck Simulator 2 Indonesia

    -

    Some of the features of Euro Truck Simulator 2 Indonesia are:

    -

    download ets2 indonesia full version + mod
    -download euro truck simulator 2 indonesia mod apk
    -download euro truck simulator 2 indonesia map + bus mod
    -download euro truck simulator 2 indonesia mod traffic
    -download euro truck simulator 2 indonesia mod sound
    -download euro truck simulator 2 indonesia mod grafik
    -download euro truck simulator 2 indonesia mod keren
    -download euro truck simulator 2 indonesia mod terbaru
    -download euro truck simulator 2 indonesia mod jetbus
    -download euro truck simulator 2 indonesia mod livery
    -download euro truck simulator 2 indonesia mod skin
    -download euro truck simulator 2 indonesia mod mobil
    -download euro truck simulator 2 indonesia mod trailer
    -download euro truck simulator 2 indonesia mod cargo
    -download euro truck simulator 2 indonesia mod money
    -download euro truck simulator 2 indonesia mod online
    -download euro truck simulator 2 indonesia mod multiplayer
    -download euro truck simulator 2 indonesia mod polisi
    -download euro truck simulator 2 indonesia mod lampu strobo
    -download euro truck simulator 2 indonesia mod klakson telolet
    -download euro truck simulator 2 indonesia mod bus shd
    -download euro truck simulator 2 indonesia mod bus sdd
    -download euro truck simulator 2 indonesia mod bus jb3
    -download euro truck simulator 2 indonesia mod bus scorpion x
    -download euro truck simulator 2 indonesia mod bus hino rk8
    -download euro truck simulator 2 indonesia mod bus scania k360
    -download euro truck simulator 2 indonesia mod bus mercedes benz oh1626
    -download euro truck simulator 2 indonesia mod bus legacy sr1 sph
    -download euro truck simulator 2 indonesia mod bus adiputro jetbus hd3+
    -download euro truck simulator 2 indonesia mod bus laksana sr2 xhd prime
    -download euro truck simulator 2 indonesia mod bus po haryanto shd ep3 mh edit jetliner by armand
    -download euro truck simulator 2 indonesia mod bus po harapan jaya shd ep3 mh edit by armand
    -download euro truck simulator 2 indonesia mod bus po efisiensi shd ep3 mh edit by armand
    -download euro truck simulator 2 indonesia mod bus po nusantara shd ep3 mh edit by armand
    -download euro truck simulator 2 indonesia mod bus po rosalia indah shd ep3 mh edit by armand
    -download euro truck simulator 2 indonesia mod bus po handoyo shd ep3 mh edit by armand
    -download euro truck simulator 2 indonesia mod bus po lorena shd ep3 mh edit by armand
    -download euro truck simulator 2 indonesia mod bus po sugeng rahayu shd ep3 mh edit by armand
    -download euro truck simulator 2 indonesia mod bus po santoso shd ep3 mh edit by armand
    -download euro truck simulator 2 indonesia mod bus po sinar jaya shd ep3 mh edit by armand
    -download euro truck simulator 2 indonesia mod map sumatra v1.0 by safarul ilham
    -download euro truck simulator 2 indonesia mod map jowo v7.5 by donal juragan ballon
    -download euro truck simulator 2 indonesia mod map pantura road edition v1.0 by dito project art
    -download euro truck simulator 2 indonesia mod map icrf wanderer v4.0 by evergreen1976
    -download euro truck simulator 2 indonesia mod map trans java extreme v1.0 by rizky ardian
    -download euro truck simulator 2 indonesia mod map trans sulawesi v1.0 by septian mr
    -download euro truck simulator 2 indonesia mod map bali v1.0 by gilang wisnu
    -download euro truck simulator 2 indonesia mod map kalimantan v1.0 by adie evergreen1976
    -download euro truck simulator 2 indonesia mod map madura v1.0 by bimo wahyu saputro

    -
      -
    • A new map that covers the islands of Java, Sumatra, Bali, and more.
    • -
    • New cities and towns to explore, such as Jakarta, Bandung, Surabaya, Denpasar, and more.
    • -
    • New roads and bridges to drive on, with realistic landscapes and landmarks.
    • -
    • New trucks and trailers that are commonly used in Indonesia, such as Hino, Mitsubishi, Isuzu, and more.
    • -
    • New traffic vehicles and pedestrians that reflect the Indonesian culture and lifestyle.
    • -
    • New signs and symbols that are written in Indonesian language and script.
    • -
    • New sounds and music that are inspired by the Indonesian traditional and modern genres.
    • -
    -

    How to download Euro Truck Simulator 2 Indonesia

    -

    To download Euro Truck Simulator 2 Indonesia, you have to follow these steps:

    -
      -
    1. Go to the official website of Euro Truck Simulator 2 Indonesia, where you can find the latest version of the mod and the installation instructions.
    2. -
    3. Download the mod file, which is a zip archive that contains several folders and files.
    4. -
    5. Extract the mod file to your Euro Truck Simulator 2 folder, which is usually located in C:\Users\YourName\Documents\Euro Truck Simulator 2.
    6. -
    7. Run the game and go to the mod manager, which is accessible from the profile selection screen.
    8. -
    9. Enable the mod by clicking on its name and moving it to the right side of the screen.
    10. -
    11. Start a new profile or load an existing one, and enjoy the game with the Indonesian mod!
    12. -
    -

    What are mods for Euro Truck Simulator 2?

    -

    Mods are modifications that change or add new features, content, or gameplay to the game. They are created by fans or developers who want to improve or customize the game according to their preferences or needs. Mods can range from simple tweaks to complex overhauls, and they can be compatible or incompatible with each other. Mods can be downloaded from various sources, such as the official website, Steam Workshop, or other websites. Mods can be installed manually or automatically, depending on their format and instructions.

    -

    Benefits of using mods for Euro Truck Simulator 2

    -

    Some of the benefits of using mods for Euro Truck Simulator 2 are:

    -
      -
    • You can enhance your gaming experience by adding new features, content, or gameplay that are not available in the base game or DLCs.
    • -
    • You can customize your game according to your personal taste, style, or mood.
    • -
    • You can explore new regions, countries, or continents that are not included in the official map.
    • -
    • You can drive new vehicles, trailers, or cargoes that are not offered in the game.
    • -
    • You can improve the graphics, sounds, or performance of the game.
    • -
    -

    How to download and install mods for Euro Truck Simulator 2

    -

    To download and install mods for Euro Truck Simulator 2, you have to follow these general steps:

    -
      -
    1. Find a mod that you like from a reliable source, such as the official website, Steam Workshop, or other websites. Make sure that the mod is compatible with your game version and DLCs.
    2. -
    3. Download the mod file, which can be a zip archive, an scs file, or an exe file.
    4. -
    5. If the mod file is a zip archive, extract it to your Euro Truck Simulator 2 folder, which is usually located in C:\Users\YourName\Documents\Euro Truck Simulator 2. If the mod file is an scs file, copy it to your Euro Truck Simulator 2\mod folder. If the mod file is an exe file, run it and follow the instructions.
    6. -
    7. Run the game and go to the mod manager, which is accessible from the profile selection screen.
    8. -
    9. Enable the mod by clicking on its name and moving it to the right side of the screen. You can also change the priority of the mod by dragging it up or down. Some mods may require a specific order to work properly.
    10. -
    11. Start a new profile or load an existing one, and enjoy the game with the mod!
    12. -
    -

    Tandem Traffic Pack mod

    -

    One of the mods that you can use for Euro Truck Simulator 2 is the Tandem Traffic Pack mod, which adds tandem trucks to the traffic. Tandem trucks are trucks that have two trailers attached to them, making them longer and heavier than normal trucks. This mod makes the traffic more diverse and realistic, as well as more challenging to drive around. The mod includes several tandem truck models from different brands and countries, such as Volvo FH16 Globetrotter (Sweden), Scania R730 Topline (Norway), Mercedes-Benz Actros MP4 (Germany), and more. The mod also works with other traffic mods, such as Jazz ycat's Traffic Pack or TrafficManiac's Traffic Pack. You can download the Tandem Traffic Pack mod from the official website or from Steam Workshop. To install the mod, follow the steps mentioned above.

    -

    Indonesia map mod

    -

    Another mod that you can use for Euro Truck Simulator 2 is the Indonesia map mod, which adds a new map that covers the islands of Indonesia. This mod is different from the Euro Truck Simulator 2 Indonesia mod, which only modifies the existing map. The Indonesia map mod is a standalone map that requires a new profile and does not work with other map mods. The mod includes many cities and towns, such as Medan, Palembang, Jakarta, Bandung, Surabaya, Denpasar, and more. The mod also features realistic landscapes, landmarks, roads, bridges, traffic, signs, and more. The mod is created by Septian MR and is updated regularly. You can download the Indonesia map mod from the official website or from other websites. To install the mod, follow the steps mentioned above, but make sure to select the indonesia.mbd module when creating a new profile.

    -

    Other popular mods for Euro Truck Simulator 2

    -

    There are many other mods that you can use for Euro Truck Simulator 2, depending on your preferences and needs. Some of the most popular mods are:

    -
      -
    • Realistic Graphics Mod, which improves the graphics quality and performance of the game.
    • -
    • Realistic Physics Mod, which enhances the physics and handling of the trucks and trailers.
    • -
    • Realistic Fuel Consumption Mod, which makes the fuel consumption more realistic and challenging.
    • -
    • Realistic Weather Mod, which adds new weather effects and sounds to the game.
    • -
    • Realistic Traffic Mod, which increases the density and variety of traffic in the game.
    • -
    • Realistic AI Traffic Mod, which makes the traffic behavior more realistic and dynamic.
    • -
    • Realistic Sounds Mod, which adds new sounds for engines, horns, tires, environment, and more.
    • -
    • Realistic Lights Mod, which improves the lighting and visibility of the game.
    • -
    • Promods Map Expansion, which adds new regions, countries, and roads to the game.
    • -
    • Rusmap Map Expansion, which adds new regions, countries, and roads to the game.
    • -
    • TruckersMP Multiplayer Mod, which lets you play online with other players in a massive multiplayer environment.
    • -
    -

    Conclusion

    -

    Euro Truck Simulator 2 is a great game that lets you drive trucks across Europe, delivering cargo and exploring different cities and countries. You can also enjoy this game with an Indonesian twist by using the Euro Truck Simulator 2 Indonesia mod, which adds Indonesian elements to the game. You can also enhance your gaming experience by using various mods that add new features, content, or gameplay to the game. You can download and install these mods from different sources, such as the official website, Steam Workshop, or other websites. You can also create your own mods or share them with other players. Euro Truck Simulator 2 is a game that offers endless possibilities and fun for truck driving enthusiasts. Download it now and start your journey!

    -

    FAQs

    -

    Here are some frequently asked questions about Euro Truck Simulator 2:

    -
      -
    1. What are the system requirements for Euro Truck Simulator 2?
      The minimum system requirements for Euro Truck Simulator 2 are: Windows 7/8/10 64-bit; Dual core CPU 2.4 GHz; 4 GB RAM; GeForce GTS 450-class (Intel HD 4000); 3 GB available space. The recommended system requirements for Euro Truck Simulator 2 are: Windows 7/8/10 64-bit; Quad core CPU 3.0 GHz; 6 GB RAM; GeForce GTX 760-class (2 GB); 3 GB available space.
    2. -
    3. How to update Euro Truck Simulator 2?
      To update Euro Truck Simulator 2, you have to run the game launcher and click on the update button. If you bought the game from Steam, it will update automatically when you launch it. If you downloaded a free trial version of the game from the official website, you have to download a new version from there.
    4. -
    5. How to activate cheats in Euro Truck Simulator 2?
      To activate cheats in Euro Truck Simulator 2, you have to edit some files in your Euro Truck Simulator 2 folder, which is usually located in C:\Users\YourName\Documents\Euro Truck Simulator 2. You have to open the config.cfg file with a text editor and change the values of some variables, such as uset g_console, uset g_developer, uset g_money, uset g_xp, and more. You can also use the console commands in the game by pressing the ~ key and typing the commands, such as g_set_time, g_set_weather, g_flyspeed, and more. However, using cheats may affect your achievements and statistics, so use them at your own risk.
    6. -
    7. How to use mods in multiplayer mode in Euro Truck Simulator 2?
      To use mods in multiplayer mode in Euro Truck Simulator 2, you have to download and install the TruckersMP mod, which is a multiplayer mod that lets you play online with other players in a massive multiplayer environment. You can also use some of the mods that are compatible with TruckersMP, such as Promods Map Expansion, Rusmap Map Expansion, Realistic Graphics Mod, and more. However, not all mods are allowed or supported by TruckersMP, so you have to check their rules and guidelines before using them.
    8. -
    9. How to create your own mods for Euro Truck Simulator 2?
      To create your own mods for Euro Truck Simulator 2, you have to use some tools and resources that are provided by SCS Software or other modders. Some of the tools and resources are: SCS Workshop Uploader, which lets you upload your mods to Steam Workshop; SCS Blender Tools, which lets you create and edit 3D models for the game; SCS Modding Wiki, which provides tutorials and documentation for modding; SCS Forum, which is a platform for discussing and sharing mods with other modders; and more. You can find these tools and resources on the official website or on other websites.
    10. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download FIFA 09 APK Data for Android The Ultimate Soccer Simulation Game.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download FIFA 09 APK Data for Android The Ultimate Soccer Simulation Game.md deleted file mode 100644 index 73472d527fd9e9a165d768fe69853fc1e68ef95c..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download FIFA 09 APK Data for Android The Ultimate Soccer Simulation Game.md +++ /dev/null @@ -1,69 +0,0 @@ - -

    Download FIFA 09 APK+Data: How to Enjoy the Classic Soccer Game on Your Android Device

    -

    If you are a fan of soccer games, you might have heard of FIFA 09, one of the most popular and acclaimed titles in the EA Sports series. Released in 2008, FIFA 09 features more than 250 gameplay improvements, realistic graphics, and various modes that let you experience the thrill of soccer on your console or PC. But did you know that you can also play FIFA 09 on your Android device?

    -

    Yes, you read that right. With FIFA 09 APK+Data, you can download and install the game on your Android device and enjoy it anytime, anywhere. In this article, we will show you how to do that, as well as what are the features and benefits of playing FIFA 09 on your Android device. Let's get started!

    -

    download fifa 09 apk+data


    Download ->>> https://gohhs.com/2uPsQA



    -

    Features of FIFA 09 APK+Data

    -

    FIFA 09 APK+Data is a modified version of the original game that allows you to run it on your Android device without any compatibility issues. It has all the features and content of the original game, plus some additional enhancements that make it more suitable for mobile gaming. Here are some of the features of FIFA 09 APK+Data:

    -

    Gameplay

    -

    FIFA 09 APK+Data offers a smooth and responsive gameplay that lets you control your players with ease. You can use the touch screen or a virtual joystick to move your players, pass, shoot, tackle, and perform other actions. You can also customize the controls according to your preference. The game also features an intelligent AI that adapts to your skill level and provides a realistic challenge.

    -

    Graphics

    -

    FIFA 09 APK+Data delivers stunning graphics that exploit the power of your Android device. The game features photorealistic player models, detailed stadiums, realistic weather effects, and dynamic lighting and shadows. The game also runs at a high frame rate and supports HD resolution for a smooth and immersive visual experience.

    -

    Modes

    -

    FIFA 09 APK+Data has a variety of modes and challenges that keep you entertained for hours. You can play a quick match, a tournament, a league, or a season with your favorite team. You can also play the Be A Pro mode, where you can create your own player and develop his skills and career over four seasons. You can also play online with other players around the world in the 10 vs. 10 mode, where you can control one player on the pitch.

    -

    Customization

    -

    FIFA 09 APK+Data lets you create your own team and player with a wide range of options. You can choose from over 500 teams and over 15,000 players from around the world. You can also edit their names, appearances, attributes, kits, formations, tactics, and more. You can also create your own custom tournaments and leagues with your own rules and settings. You can also download the latest rosters and updates from the internet to keep your game up to date.

    -

    How to Download and Install FIFA 09 APK+Data

    -

    Now that you know the features of FIFA 09 APK+Data, you might be wondering how to download and install it on your Android device. Well, it's not that hard, but you need to follow some steps carefully to avoid any errors or issues. Here are the steps to download and install FIFA 09 APK+Data:

    -

    Requirements

    -

    Before you download and install FIFA 09 APK+Data, you need to make sure that your Android device meets the minimum and recommended specifications for the game. Here are the requirements for FIFA 09 APK+Data:

    - - - - - - - - -
    MinimumRecommended
    Android version: 4.0 or higherAndroid version: 5.0 or higher
    RAM: 1 GB or moreRAM: 2 GB or more
    Storage: 2 GB or more free spaceStorage: 4 GB or more free space
    Processor: Dual-core or higherProcessor: Quad-core or higher
    Graphics: Adreno, Mali, PowerVR, or Tegra GPUGraphics: Adreno, Mali, PowerVR, or Tegra GPU
    Internet connection: Required for online mode and updatesInternet connection: Required for online mode and updates
    -

    If your device meets these requirements, you can proceed to the next step. If not, you might encounter some problems while running the game, such as lagging, crashing, or freezing.

    -

    [FIFA 09 APK + OBB Data Download for Android]
    -[FIFA 09 APK + Data Obb By EA Sports - RisTechy]
    -[FIFA 09 APK + OBB Download For Android (Offline)]

    -

    Steps

    -

    The next step is to download and install FIFA 09 APK+Data from a reliable source. There are many websites that offer the game for free, but not all of them are safe and trustworthy. Some of them might contain malware, viruses, or fake files that can harm your device or steal your data. Therefore, you need to be careful and choose a reputable website that has positive reviews and ratings from other users.

    -

    One of the websites that we recommend is [FIFA 09 APK+Data Download], which provides the game in a compressed zip file that contains both the APK and the data files. You can download the zip file from this website by clicking on the download button and following the instructions. The file size is about 1.5 GB, so make sure you have enough space and a stable internet connection before downloading it.

    -

    After downloading the zip file, you need to extract it using a file manager app such as [ZArchiver], which you can download from the Google Play Store for free. You need to locate the zip file in your device's storage and tap on it to open it with ZArchiver. Then, you need to select "Extract here" or "Extract to" option and choose a destination folder where you want to extract the files.

    -

    The extraction process might take some time depending on your device's performance and the file size. Once it is done, you will see two folders named "com.ea.fifa09" and "FIFA_09_APK". The first folder contains the data files of the game, which you need to move or copy to the "Android/obb" folder in your device's storage. The second folder contains the APK file of the game, which you need to install by tapping on it and allowing the permissions.

    -

    Tips

    -

    The final step is to launch the game and enjoy it on your Android device. However, before you do that, here are some tips that can help you avoid errors and issues while installing FIFA 09 APK+Data:

    -
      -
    • Make sure you have enough storage space on your device before downloading and extracting the zip file.
    • -
    • Make sure you have a stable internet connection while downloading and updating the game.
    • -
    • Make sure you disable any antivirus or security apps that might interfere with the installation process.
    • -
    • Make sure you follow the steps correctly and do not skip any of them.
    • -
    • If you encounter any problems while running the game, such as black screen, force close, or license verification error, try clearing the cache and data of the game from the settings app or reinstalling it.
    • -
    • If you have any questions or feedback about the game, you can contact the developer of the game through their email or website.
    • -
    -

    Conclusion

    -

    FIFA 09 APK+Data is a great way to enjoy the classic soccer game on your Android device. It has all the features and content of the original game, plus some enhancements that make it more suitable for mobile gaming. You can play the game offline or online, with your favorite team or player, and in various modes and challenges. You can also customize the game according to your preference and download the latest updates from the internet.

    -

    To download and install FIFA 09 APK+Data, you need to follow some steps carefully and make sure your device meets the requirements for the game. You also need to avoid any errors or issues that might occur while installing or running the game. If you follow the steps and tips that we provided in this article, you should be able to play FIFA 09 APK+Data without any problems.

    -

    So, what are you waiting for? Download FIFA 09 APK+Data now and experience the thrill of soccer on your Android device!

    -

    FAQs

    -

    Q1: Is FIFA 09 APK+Data safe and legal to download?

    -

    A1: FIFA 09 APK+Data is safe to download as long as you get it from a reliable source that does not contain any malware, viruses, or fake files. However, it is not legal to download FIFA 09 APK+Data as it is a modified version of the original game that violates the copyright and license of EA Sports. Therefore, we do not endorse or recommend downloading FIFA 09 APK+Data and we are not responsible for any consequences that might arise from doing so.

    -

    Q2: How much space does FIFA 09 APK+Data take on your Android device?

    -

    A2: FIFA 09 APK+Data takes about 2 GB of space on your Android device after extraction. However, you need to have at least 4 GB of free space before downloading and extracting the zip file.

    -

    Q3: Can you play FIFA 09 APK+Data online with other players?

    -

    A3: Yes, you can play FIFA 09 APK+Data online with other players in the 10 vs. 10 mode, where you can control one player on the pitch. However, you need to have a stable internet connection and a compatible device to play online.

    -

    Q4: How can you update FIFA 09 APK+Data to the latest version?

    -

    A4: You can update FIFA 09 APK+Data to the latest version by downloading the latest zip file from the same source that you got the game from and following the same steps that we provided in this article. You can also check for updates from within the game by going to the settings menu and tapping on the update button.

    -

    Q5: What are some alternatives to FIFA 09 APK+Data for Android?

    -

    A5: Some alternatives to FIFA 09 APK+Data for Android are:

    -
      -
    • [FIFA Mobile]: This is the official mobile version of FIFA by EA Sports that features over 700 teams, 17,000 players, and various modes and events. It is free to download and play, but it requires an internet connection and has in-app purchases.
    • -
    • [Dream League Soccer]: This is a popular soccer game by First Touch Games that lets you create your own team, recruit players, compete in leagues and tournaments, and customize your stadium. It is free to download and play, but it has ads and in-app purchases.
    • -
    • [PES 2021]: This is another popular soccer game by Konami that features over 8000 players, realistic graphics, and various modes and challenges. It is free to download and play, but it has ads and in-app purchases.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download GoTube APK and Discover New Artists Albums and Tracks.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download GoTube APK and Discover New Artists Albums and Tracks.md deleted file mode 100644 index 5ffed52119592c07841e510f60986edc2cf7aedf..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download GoTube APK and Discover New Artists Albums and Tracks.md +++ /dev/null @@ -1,101 +0,0 @@ - -

    GoTube APK: A Free Video Player App for Android

    -

    If you are looking for a video player app that can play videos in a popup window, stream music endlessly, and offer many other features, then you should check out GoTube APK. GoTube APK is an Android app that allows you to watch videos from YouTube and other platforms in a convenient and customizable way. In this article, we will tell you what GoTube APK is, what features it offers, and how to download and install it on your device.

    -

    What is GoTube APK?

    -

    GoTube APK is an Android app that lets you watch videos from YouTube and other platforms in a popup window. You can also stream music, search for videos, channels and playlists, access and manage your channel, switch video quality, add videos to playlists without login, and more. GoTube APK is not available on the Google Play Store, but you can download it from a trusted source like [APKCombo](^1^).

    -

    go tube apk free download


    Download Filehttps://gohhs.com/2uPnBa



    -

    Features of GoTube APK

    -

    GoTube APK offers many features that make it a great video player app for Android. Here are some of them:

    -

    Collection of trending videos

    -

    GoTube APK shows you a collection of trending videos from various categories and genres. You can browse through them and watch what interests you.

    -

    Add videos to playlists without login

    -

    You don't need to log in to YouTube or any other platform to add videos to playlists. You can create your own playlists and add videos to them with one click.

    -

    Switch video quality

    -

    You can switch the video quality according to your preference and network speed. You can choose from 240p, 360p, 720p, or 1080p.

    -

    Endless music streaming

    -

    You can stream music from YouTube or other platforms without interruption. You can also enable closed captioning for the lyrics.

    -

    Closed captioning

    -

    You can turn on closed captioning for any video that supports it. You can also change the font size, color, and background of the captions.

    -

    go tube apk free download latest version
    -go tube apk free download for android
    -go tube apk free download 2023
    -go tube apk free download no ads
    -go tube apk free download xapk
    -go tube apk free download premium
    -go tube apk free download gotube studio
    -go tube apk free download offline
    -go tube apk free download mod
    -go tube apk free download cracked
    -go tube video player app free download apk
    -go tube music streaming app free download apk
    -go tube popup video app free download apk
    -go tube adblock app free download apk
    -go tube night mode app free download apk
    -how to install go tube apk free download
    -how to use go tube apk free download
    -how to update go tube apk free download
    -how to uninstall go tube apk free download
    -how to get go tube apk free download pro
    -is go tube apk free download safe
    -is go tube apk free download legal
    -is go tube apk free download legit
    -is go tube apk free download virus-free
    -is go tube apk free download malware-free
    -reviews of go tube apk free download
    -ratings of go tube apk free download
    -features of go tube apk free download
    -benefits of go tube apk free download
    -drawbacks of go tube apk free download
    -alternatives to go tube apk free download
    -competitors of go tube apk free download
    -comparison of go tube apk free download and other apps
    -pros and cons of go tube apk free download
    -tips and tricks for go tube apk free download
    -best practices for go tube apk free download
    -troubleshooting for go tube apk free download
    -faqs for go tube apk free download
    -support for go tube apk free download
    -contact for go tube apk free download
    -privacy policy for go tube apk free download [^3^]
    -terms of service for go tube apk free download [^2^]
    -website for go tube apk free download [^1^]
    -blog for go tube apk free download [^1^]
    -forum for go tube apk free download [^1^]
    -social media for go tube apk free download [^1^]
    -youtube channel for go tube apk free download [^1^]
    -testimonials for go tube apk free download [^1^]
    -coupons for go tube apk free download [^1^]

    -

    Search videos, channels and playlists

    -

    You can search for any video, channel or playlist that you want to watch. You can also filter the results by relevance, date, rating, or view count.

    -

    Access and manage your channel

    -

    You can access and manage your YouTube channel from GoTube APK. You can upload videos, edit your profile, check your analytics, respond to comments, and more.

    -

    Night mode

    -

    You can enable night mode to reduce eye strain and improve sleep quality. Night mode changes the background color to black and the text color to white.

    -

    Sleep timer

    -

    You can set a sleep timer to stop the video playback after a specific time. This saves data usage and battery life.

    -

    How to download and install GoTube APK?

    -

    If you want to download and install GoTube APK on your device, follow these steps:

    -

    Download GoTube APK from a trusted source

    -

    The first step is to download GoTube APK from a trusted source like [APKCombo](^1^). You can use the link below to download the latest version of GoTube APK:

    - [Download GoTube APK](^1^)

    Make sure you have enough storage space on your device before downloading the file.

    -

    Enable unknown sources on your device

    -

    The next step is to enable unknown sources on your device. This is necessary to install apps that are not from the Google Play Store. To do this, go to your device settings and look for security or privacy options. Then, find the option that says unknown sources or allow installation of apps from unknown sources. Turn it on and confirm your choice.

    -

    Install GoTube APK and enjoy

    -

    The final step is to install GoTube APK and enjoy watching videos in a popup window. To do this, locate the downloaded file on your device and tap on it. Then, follow the instructions on the screen to complete the installation. Once the installation is done, you can launch GoTube APK and start watching videos from YouTube and other platforms.

    -

    Conclusion

    -

    GoTube APK is a free video player app for Android that lets you watch videos in a popup window, stream music endlessly, and offer many other features. It is not available on the Google Play Store, but you can download it from a trusted source like [APKCombo]. To install it, you need to enable unknown sources on your device and follow the steps above. GoTube APK is a great app for video lovers who want to enjoy videos in a convenient and customizable way.

    -

    Here are some FAQs about GoTube APK:

    -
      -
    • Q: Is GoTube APK safe to use?
    • -
    • A: GoTube APK is safe to use as long as you download it from a trusted source like [APKCombo]. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware.
    • -
    • Q: Does GoTube APK require root access?
    • -
    • A: No, GoTube APK does not require root access to work. You can install it on any Android device without rooting it.
    • -
    • Q: Does GoTube APK support Chromecast?
    • -
    • A: Yes, GoTube APK supports Chromecast. You can cast videos from GoTube APK to your TV or other devices that support Chromecast.
    • -
    • Q: Can I download videos from GoTube APK?
    • -
    • A: No, GoTube APK does not allow you to download videos from YouTube or other platforms. You can only watch them online in a popup window.
    • -
    • Q: How can I update GoTube APK?
    • -
    • A: You can update GoTube APK by downloading the latest version from [APKCombo] and installing it over the existing one. You can also check for updates within the app settings.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Inazuma Eleven GO Strikers 2013 for PC with Mod Apk Features.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Inazuma Eleven GO Strikers 2013 for PC with Mod Apk Features.md deleted file mode 100644 index f0fd48934a399462cd6654f34476570cc73dfa7e..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Inazuma Eleven GO Strikers 2013 for PC with Mod Apk Features.md +++ /dev/null @@ -1,83 +0,0 @@ - -

    Inazuma Eleven GO Strikers 2013 Download Mod APK: How to Play the Ultimate Soccer Game on Your Android Device

    -

    If you are a fan of soccer games and anime, you might have heard of Inazuma Eleven, a popular franchise that combines both elements. The franchise has spawned several games, anime series, movies, and manga. One of the most recent games in the series is Inazuma Eleven GO Strikers 2013, a spin-off game for the Nintendo Wii that features characters from the original DS games and the main characters of Inazuma Eleven GO and Inazuma Eleven GO Chrono Stone. But what if you don't have a Wii console or you live in a region where the game is not available? Don't worry, there is a solution for you. You can download and play Inazuma Eleven GO Strikers 2013 mod apk on your Android device. In this article, we will tell you what is Inazuma Eleven GO Strikers 2013, what is a mod apk, how to download and install it on your device, and what are the benefits of playing it. Read on to find out more.

    -

    inazuma eleven go strikers 2013 download mod apk


    DOWNLOAD ··· https://gohhs.com/2uPsgS



    -

    What is Inazuma Eleven GO Strikers 2013?

    -

    Inazuma Eleven GO Strikers 2013 is a soccer game that was released in Japan in December 2012 for the Nintendo Wii. It is a spin-off game that features characters from the original DS games, as well as the main characters of Inazuma Eleven GO and Inazuma Eleven GO Chrono Stone, two anime series that are part of the franchise. The game is a fast-paced soccer game that

    allows you to use spectacular super techniques that can turn the tide of the match. You can choose from a variety of modes, such as story mode, exhibition mode, tournament mode, and battle mode. In story mode, you can relive the events of the Inazuma Eleven GO and Inazuma Eleven GO Chrono Stone anime series, and face off against powerful teams and enemies. In exhibition mode, you can customize your own team and play against the CPU or another player. In tournament mode, you can compete in different cups and leagues and aim for the championship. In battle mode, you can play online with other players from around the world, even in a 4 vs 4 match.

    -

    What is a mod apk?

    -

    A mod apk is a modified version of an Android application that allows users to access features that are not available in the original version. For example, a mod apk can bypass the region lock and let you play games that are not officially released in your country. A mod apk can also give you unlimited resources, unlocked characters, and enhanced graphics in games. A mod apk is usually created by third-party developers who modify the original apk file and add or remove some codes. However, not all mod apks are safe to use, as some may contain viruses or malware that can harm your device or steal your personal information. Therefore, you should always download mod apks from reliable sources and scan them with antivirus software before installing them.

    -

    How to download and install Inazuma Eleven GO Strikers 2013 mod apk on your Android device?

    -

    If you want to play Inazuma Eleven GO Strikers 2013 on your Android device, you will need to download and install the mod apk file. Here are the steps to do so:

    -
      -
    1. Find a reliable source that offers the Inazuma Eleven GO Strikers 2013 mod apk file and download it to your device. You can search online for websites or forums that provide the link to the file. Make sure to check the reviews and ratings of the source before downloading anything.
    2. -
    3. Enable the installation of unknown sources in your device settings. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install applications that are not from the Google Play Store.
    4. -
    5. Locate the Inazuma Eleven GO Strikers 2013 mod apk file in your file manager and tap on it to install it. Follow the instructions on the screen and wait for the installation to complete.
    6. -
    7. Launch the game and enjoy playing it on your Android device. You may need to grant some permissions to the game for it to run properly.
    8. -
    -

    What are the benefits of playing Inazuma Eleven GO Strikers 2013 mod apk?

    -

    Playing Inazuma Eleven GO Strikers 2013 mod apk has many benefits compared to playing the original version of the game. Here are some of them:

    -
      -
    • You can play online with other players from around the world, even in a 4 vs 4 battle mode. This way, you can challenge your friends or make new ones while enjoying the game.
    • -
    • You can unlock all the playable characters, teams, and modes in the game without spending any money or time. You can choose from over 200 characters and over 40 teams, each with their own unique skills and abilities. You can also access all the modes, such as story mode, exhibition mode, tournament mode, and battle mode.
    • -
    • You can experience the game in high-quality graphics and sound effects that enhance the gameplay. The mod apk improves the resolution, framerate, and textures of the game, making it look more realistic and immersive. The sound effects are also more clear and crisp, adding to the excitement of the game.
    • -
    -

    Conclusion

    -

    Inazuma Eleven GO Strikers 2013 is a fun and exciting soccer game that fans of the franchise will love. It is a spin-off game that features characters from the original DS games and the main characters of Inazuma Eleven GO and Inazuma Eleven GO Chrono Stone anime series. It is a fast-paced soccer game that allows you to use spectacular super techniques and compete in various modes. With the mod apk, you can play the game on your Android device without any restrictions or limitations. You can play online with other players, unlock all the characters and teams, and enjoy high-quality graphics and sound effects. Download Inazuma Eleven GO Strikers 2013 mod apk today and join the ultimate soccer battle with your favorite characters.

    -

    FAQs

    -

    Is Inazuma Eleven GO Strikers 2013 mod apk safe to use?

    -

    In general, yes, as long as you download it from a reliable source and scan it with antivirus software before installing it. However, some mod apks may contain viruses or malware that can harm your device or steal your personal information. Therefore, you should always be careful and cautious when downloading and installing mod apks. You should also avoid using mod apks for games that involve real money transactions or personal data, as you may risk losing them or compromising them.

    -

    How much space does Inazuma Eleven GO Strikers 2013 mod apk require on my device?

    -

    The size of the Inazuma Eleven GO Strikers 2013 mod apk file may vary depending on the source and the version of the mod. However, the average size of the file is around 1.5 GB, so you will need to have enough free space on your device to download and install it. You may also need to have some extra space for the game data and cache files that will be generated when you play the game.

    -

    inazuma eleven go strikers 2013 apk mod free download
    -download inazuma eleven go strikers 2013 mod apk for android
    -inazuma eleven go strikers 2013 mod apk unlimited money
    -how to download inazuma eleven go strikers 2013 mod apk
    -inazuma eleven go strikers 2013 mod apk latest version
    -inazuma eleven go strikers 2013 mod apk offline
    -download inazuma eleven go strikers 2013 mod apk english version
    -inazuma eleven go strikers 2013 mod apk no root
    -inazuma eleven go strikers 2013 mod apk obb
    -inazuma eleven go strikers 2013 mod apk data
    -inazuma eleven go strikers 2013 mod apk android 1
    -inazuma eleven go strikers 2013 mod apk revdl
    -inazuma eleven go strikers 2013 mod apk rexdl
    -inazuma eleven go strikers 2013 mod apk happymod
    -inazuma eleven go strikers 2013 mod apk pure
    -inazuma eleven go strikers 2013 mod apk full version
    -inazuma eleven go strikers 2013 mod apk mega
    -inazuma eleven go strikers 2013 mod apk mediafire
    -inazuma eleven go strikers 2013 mod apk google drive
    -inazuma eleven go strikers 2013 mod apk zippyshare
    -inazuma eleven go strikers 2013 mod apk highly compressed
    -inazuma eleven go strikers 2013 mod apk low mb
    -inazuma eleven go strikers 2013 mod apk ppsspp
    -inazuma eleven go strikers 2013 mod apk psp
    -inazuma eleven go strikers 2013 mod apk emulator
    -inazuma eleven go strikers 2013 mod apk cheat codes
    -inazuma eleven go strikers 2013 mod apk hack tool
    -inazuma eleven go strikers 2013 mod apk unlimited everything
    -inazuma eleven go strikers 2013 mod apk all characters unlocked
    -inazuma eleven go strikers 2013 mod apk new update
    -inazuma eleven go strikers 2013 mod apk online multiplayer
    -inazuma eleven go strikers 2013 mod apk best team
    -inazuma eleven go strikers 2013 mod apk gameplay
    -inazuma eleven go strikers 2013 mod apk review
    -inazuma eleven go strikers 2013 mod apk tips and tricks
    -inazuma eleven go strikers 2013 mod apk walkthrough
    -download game inazuma eleven go strikers 2013 mod apk android
    -cara download game inazuma eleven go strikers 2013 mod apk android
    -game mirip inazuma eleven go strikers 2013 mod apk android
    -game offline seperti inazuma eleven go strikers 2013 mod apk android

    -

    Can I play Inazuma Eleven GO Strikers 2013 mod apk offline?

    -

    Yes, you can play Inazuma Eleven GO Strikers 2013 mod apk offline, as long as you have already installed the game and launched it at least once while connected to the internet. This is because the game needs to verify your device and account information before letting you play. Once you have done that, you can play the game offline without any problems. However, some features of the game, such as online battle mode, may not be available when you are offline.

    -

    What are some of the best teams and characters to use in Inazuma Eleven GO Strikers 2013 mod apk?

    -

    This is a matter of personal preference and strategy, as different teams and characters have different strengths and weaknesses. However, some of the most popular and powerful teams and characters in the game are:

    -
      -
    • Raimon: The main team of the Inazuma Eleven GO series, led by Matsukaze Tenma. They have a balanced team with strong attackers, defenders, and goalkeepers. Some of their best players are Tsurugi Kyousuke, Shindou Takuto, Nishizono Shinsuke, Kirino Ranmaru, and Shinsuke.
    • -
    • Chrono Storm: The ultimate team of the Inazuma Eleven GO Chrono Stone series, composed of players from different eras and timelines. They have a diverse team with unique skills and abilities. Some of their best players are Fei Rune, Hakuryuu, Zanark Avalonic, Kinako Nanobana, and Wonderbot.
    • -
    • Inazuma Legend Japan: The legendary team of the original Inazuma Eleven series, composed of players from different schools and regions. They have an experienced team with powerful techniques and teamwork. Some of their best players are Endou Mamoru, Gouenji Shuuya, Kidou Yuuto, Fubuki Shirou, and Kazemaru Ichirouta.
    • -
    -

    How can I update Inazuma Eleven GO Strikers 2013 mod apk to the latest version?

    -

    To update Inazuma Eleven GO Strikers 2013 mod apk to the latest version, you will need to download and install the new version of the mod apk file from the same source that you used before. You may also need to uninstall the previous version of the game before installing the new one. However, you should always back up your game data before updating or uninstalling the game, as you may lose your progress or settings otherwise.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Roblox Kiwi V2 and Unlock All Features.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Roblox Kiwi V2 and Unlock All Features.md deleted file mode 100644 index d175f1788dd24ca848c23c0c4cd02b7864b1a901..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Roblox Kiwi V2 and Unlock All Features.md +++ /dev/null @@ -1,96 +0,0 @@ - -

    Kiwi V2 Download Roblox: How to Install and Use the New Overpowered Executor

    -

    Roblox is a popular online gaming platform that allows users to create and play millions of games. However, some players may want to have more fun and control over their games, such as using cheats, hacks, mods, or scripts. That's where Kiwi V2 comes in. In this article, we will show you what Kiwi V2 is, why you need it, how to download and install it, and how to use it for Roblox games.

    -

    kiwi v2 download roblox


    Download Ziphttps://gohhs.com/2uPtFc



    -

    What is Kiwi V2 and why do you need it?

    -

    Kiwi V2 is a powerful script executor for Roblox games

    -

    A script executor is a tool that allows you to run custom code or scripts on Roblox games. Scripts are pieces of code that can modify or enhance your gameplay experience, such as giving you unlimited money, health, speed, items, or abilities. Scripts can also bypass anti-cheat systems or game rules, allowing you to do things that are normally impossible or forbidden.

    -

    Kiwi V2 allows you to run any script you want on any game you want

    -

    Unlike some other script executors, Kiwi V2 does not limit you to specific games or scripts. You can use Kiwi V2 to run any script you want on any game you want, as long as the script is compatible with the game. This means that you can enjoy a variety of games with different scripts, depending on your mood and preference.

    -

    Kiwi V2 has many features and advantages over other executors

    -

    Kiwi V2 is not just a simple script executor. It has many features and advantages that make it stand out from other executors. Some of these features are:

    -
      -
    • Kiwi V2 has a user-friendly interface that is easy to navigate and use.
    • -
    • Kiwi V2 has a built-in script hub that contains hundreds of scripts for various games.
    • -
    • Kiwi V2 has a fast injection speed that ensures smooth and stable performance.
    • -
    • Kiwi V2 has a high compatibility rate that supports most games and scripts.
    • -
    • Kiwi V2 has an auto-update system that keeps it up-to-date with the latest patches and fixes.
    • -
    • Kiwi V2 has a stealth mode that hides it from detection by anti-cheat systems or game moderators.
    • -
    -

    How to download and install Kiwi V2 for Roblox?

    -

    Download Kiwi V2 from the official website or a trusted source

    -

    The first step to use Kiwi V2 is to download it from the official website or a trusted source. You can find the official website by searching for "Kiwi V2 download" on Google or Bing. Alternatively, you can use this link: [Kiwi V2 Download]. Make sure that you download the latest version of Kiwi V2, which is currently 2.0.5. Do not download Kiwi V2 from untrusted sources, as they may contain viruses or malware that can harm your computer or account.

    -

    Extract the zip file and run the setup.exe file

    -

    After you download Kiwi V2, you will get a zip file that contains the setup files. You need to extract the zip file to a folder on your computer. You can use any program that can extract zip files, such as WinRAR or 7-Zip. Once you extract the zip file, you will see a file called setup.exe. This is the installer for Kiwi V2. Double-click on the setup.exe file to run it.

    -

    Follow the instructions and agree to the terms and conditions

    -

    The setup.exe file will launch a wizard that will guide you through the installation process. You need to follow the instructions and agree to the terms and conditions of Kiwi V2. The installation process is simple and quick, and it will create a shortcut for Kiwi V2 on your desktop. You can also choose where to install Kiwi V2 on your computer, but we recommend that you use the default location.

    -

    Launch Kiwi V2 and select the game you want to exploit

    -

    After you install Kiwi V2, you can launch it by clicking on the shortcut on your desktop or by finding it in your start menu. When you launch Kiwi V2, you will see a window that shows a list of games that you can exploit with Kiwi V2. You can scroll through the list and select the game that you want to exploit. Alternatively, you can use the search bar to find your game by typing its name.

    -

    How to use Kiwi V2 for Roblox?

    -

    Find a script that works with your game and copy it to your clipboard

    -

    The next step to use Kiwi V2 is to find a script that works with your game and copy it to your clipboard. A script is a piece of code that can modify or enhance your gameplay experience, such as giving you unlimited money, health, speed, items, or abilities. Scripts can also bypass anti-cheat systems or game rules, allowing you to do things that are normally impossible or forbidden.

    -

    kiwi v2 exploit download for roblox
    -how to use kiwi v2 roblox script executor
    -kiwi v2 roblox hack free download
    -kiwi v2 download virus and threat protection
    -kiwi v2 roblox exploit tutorial
    -kiwi v2 download no key system
    -kiwi v2 roblox script hub
    -kiwi v2 download latest version
    -kiwi v2 roblox exploit commands
    -kiwi v2 download safe and secure
    -kiwi v2 roblox jailbreak script
    -kiwi v2 download windows 10
    -kiwi v2 roblox admin panel
    -kiwi v2 download without ads
    -kiwi v2 roblox aimbot script
    -kiwi v2 download for mac
    -kiwi v2 roblox gui maker
    -kiwi v2 download discord server
    -kiwi v2 roblox fly hack
    -kiwi v2 download 2023 update
    -kiwi v2 roblox game guardian
    -kiwi v2 download zip file
    -kiwi v2 roblox injector download
    -kiwi v2 download mediafire link
    -kiwi v2 roblox mod menu
    -kiwi v2 download mega.nz
    -kiwi v2 roblox noclip script
    -kiwi v2 download password
    -kiwi v2 roblox phantom forces script
    -kiwi v2 download reddit review
    -kiwi v2 roblox speed hack
    -kiwi v2 download unblocked games 66
    -kiwi v2 roblox teleport script
    -kiwi v2 download youtube video
    -kiwi v2 roblox unlimited money script

    -

    You can find scripts for various games on websites such as [Roblox Scripts] or [V3rmillion]. You can also use the built-in script hub of Kiwi V2, which contains hundreds of scripts for various games. To access the script hub, click on the "Script Hub" tab on Kiwi V2 and browse through the categories of scripts. You can also use the search bar to find a script by typing its name.

    -

    Once you find a script that works with your game, copy it to your clipboard by selecting it and pressing Ctrl+C on your keyboard. Make sure that you copy the entire script and not just a part of it.

    -

    Open Kiwi V2 and click on the "Execute" tab

    -

    The next step to use Kiwi V2 is to open it and click on the "Execute" tab. This is where you can paste and run your script on your game. To open Kiwi V2, click on the shortcut on your desktop or find it in your start menu. When you open Kiwi V2, make sure that you have selected the game that you want to exploit from the list of games.

    -

    Once you open Kiwi V2, click on the "Execute" tab at the top of the window. This will open a text box where you can paste and run your script.

    -

    Paste the script in the text box and click on the "Execute" button

    -

    The final step to use Kiwi V2 is to paste the script in the text box and click on the "Execute" button. To paste the script in the text box, press Ctrl+V on your keyboard or right-click and select "Paste". You should see the script appear in the text box.

    -

    To run the script on your game, click on the "Execute" button at the bottom of the window. This will inject the script into your game and activate its effects. You should see a message saying "Script executed successfully" if everything goes well.

    -

    Enjoy the game with your new abilities and features

    -

    Congratulations! You have successfully used Kiwi V2 to run a script on your Rob lox game. You can now enjoy the game with your new abilities and features, such as unlimited money, health, speed, items, or abilities. You can also bypass anti-cheat systems or game rules, allowing you to do things that are normally impossible or forbidden.

    -

    However, you should always use Kiwi V2 with caution and respect. Do not abuse the scripts or ruin the game for other players. Do not brag or show off your scripts to other players or game moderators. Do not use scripts that are harmful or malicious to your computer or account. Do not use scripts that are outdated or incompatible with your game. Always check the source and reputation of the scripts before using them.

    -

    Conclusion

    -

    Kiwi V2 is a great tool for Roblox players who want to have more fun and control over their games. Kiwi V2 is easy to download, install, and use, and has many features and advantages over other executors. Kiwi V2 is safe and undetected, but you should always use it with caution and respect.

    -

    If you want to download Kiwi V2 and try it for yourself, you can use this link: [Kiwi V2 Download]. If you want to find more scripts for various games, you can use these links: [Roblox Scripts] or [V3rmillion]. If you want to learn more about Kiwi V2 and its features, you can visit the official website or join the Discord server.

    -

    We hope that this article has helped you understand what Kiwi V2 is, why you need it, how to download and install it, and how to use it for Roblox games. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading and happy gaming!

    -

    FAQs

    -
      -
    • Q: Is Kiwi V2 free?
    • -
    • A: Yes, Kiwi V2 is free to download and use. However, you may need to complete a short survey or offer before downloading it.
    • -
    • Q: Is Kiwi V2 safe?
    • -
    • A: Yes, Kiwi V2 is safe and undetected by anti-cheat systems or game moderators. However, you should always use it with caution and respect.
    • -
    • Q: Is Kiwi V2 compatible with all games and scripts?
    • -
    • A: No, Kiwi V2 is not compatible with all games and scripts. Some games or scripts may not work with Kiwi V2 or may cause errors or crashes. You should always check the compatibility and reputation of the games and scripts before using them.
    • -
    • Q: How do I update Kiwi V2?
    • -
    • A: Kiwi V2 has an auto-update system that keeps it up-to-date with the latest patches and fixes. You do not need to manually update Kiwi V2.
    • -
    • Q: How do I uninstall Kiwi V2?
    • -
    • A: To uninstall Kiwi V2, you can use the uninstaller that comes with the setup files. You can also delete the folder where you installed Kiwi V2 on your computer.
    • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py b/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py deleted file mode 100644 index 76e4b272b479a26c63d120c818c140870cd8c287..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .backbone import build_backbone diff --git a/spaces/fgibarra/fraud-prevention/app.py b/spaces/fgibarra/fraud-prevention/app.py deleted file mode 100644 index 973b70be68b00d3c4e7b3a5a94c1773acf48d567..0000000000000000000000000000000000000000 --- a/spaces/fgibarra/fraud-prevention/app.py +++ /dev/null @@ -1,190 +0,0 @@ -import gradio as gr -import json -import pandas as pd -import pickle -import logging -import os - - -MAIN_FOLDER = os.path.dirname(os.path.curdir) -MODEL_PATH = os.path.join(MAIN_FOLDER, "model/modelo_proyecto_final.pkl") -COLUMNS_PATH = os.path.join(MAIN_FOLDER, "model/categories_ohe_without_fraudulent.pickle") -BINS_ORDER = os.path.join(MAIN_FOLDER, "model/saved_bins_order.pickle") -BINS_TRANSACTION = os.path.join(MAIN_FOLDER, "model/saved_bins_transaction.pickle") - -logging.info(f"MAIN_FOLDER: {MAIN_FOLDER}") - -with open(MODEL_PATH, "rb") as f: - model = pickle.load(f) - -with open(COLUMNS_PATH, 'rb') as handle: - ohe_tr = pickle.load(handle) - -with open(BINS_ORDER, 'rb') as handle: - new_saved_bins_order = pickle.load(handle) - -with open(BINS_TRANSACTION, 'rb') as handle: - new_saved_bins_transaction = pickle.load(handle) - - -def read_json_file(file_path): - with open(file_path, 'r') as f: - return json.load(f) - - -def get_payment_method_issuer(): - return read_json_file('jsons/payment_method_issuer.json') - - -def get_payment_method_provider(): - return read_json_file('jsons/payment_method_provider.json') - - -# Define params names -PARAMS_NAME = [ - "orderAmount", - "orderState", - "paymentMethodRegistrationFailure", - "paymentMethodType", - "paymentMethodProvider", - "paymentMethodIssuer", - "transactionAmount", - "transactionFailed", - "emailDomain", - "emailProvider", - "customerIPAddressVersion", - "sameCity" -] - - -def predict(*args): - answer_dict = {} - - for i in range(len(PARAMS_NAME)): - answer_dict[PARAMS_NAME[i]] = [args[i]] - - single_instance = pd.DataFrame.from_dict(answer_dict) - - # Manejar puntos de corte o bins - single_instance["orderAmount"] = single_instance["orderAmount"].astype(float) - single_instance["orderAmount"] = pd.cut(single_instance['orderAmount'], - bins=new_saved_bins_order, - include_lowest=True) - - single_instance["transactionAmount"] = single_instance["transactionAmount"].astype(int) - single_instance["transactionAmount"] = pd.cut(single_instance['transactionAmount'], - bins=new_saved_bins_order, - include_lowest=True) - - # One hot encoding - single_instance_ohe = pd.get_dummies(single_instance).reindex(columns=ohe_tr).fillna(0) - - prediction = model.predict(single_instance_ohe) - type_of_fraud = int(prediction[0]) - - # Adaptación respuesta - response = "Error parsing value" - if type_of_fraud == 0: - response = "False" - if type_of_fraud == 1: - response = "True" - if type_of_fraud == 2: - response = "Warning" - - return response - - -with gr.Blocks() as demo: - gr.Markdown( - """ - # Prevención de Fraude 🙂😠 - """ - ) - - with gr.Row(): - with gr.Column(): - gr.Markdown( - """ - ## Predecir si un cliente es fraudulento o no - """ - ) - - order_amount = gr.Slider(label="Order Amount", minimum=0, maximum=500, step=1, randomize=True) - - order_state = gr.Radio(label="Order State", choices=["failed", "fulfilled", "pending"], value="fulfilled") - - payment_method_registration_failure = gr.Radio(label="Payment Method Registration Failure", - choices=["True", "False"], value="False") - - payment_method_type = gr.Radio(label="Payment Method Type", - choices=["card", "apple pay", "paypal", "bitcoin"], value="card") - - payment_method_provider = gr.Dropdown(label="Payment Method Provider", - choices=get_payment_method_provider(), - multiselect=False, value="JCB 16 digit") - - payment_method_issuer = gr.Dropdown(label="Payment Method Issuer", - choices=get_payment_method_issuer(), - multiselect=False, - value="Citizens First Banks") - - transaction_amount = gr.Slider(label="Transaction Amount", minimum=0, maximum=500, step=1, randomize=True) - - transaction_failed = gr.Radio(label="Transaction Failed", choices=["True", "False"], value="False") - - email_domain = gr.Radio(label="Email Domain", - choices=["biz", "com", "info", "net", "org", "weird"], - value="com") - - email_provider = gr.Radio(label="Email Provider", - choices=["gmail", "hotmail", "yahoo", "weird", "other"], - value="gmail") - - customer_ip_address_version = gr.Radio(label="Customer IP Address Version", - choices=["4.0", "6.0"], - value="4.0") - - same_city = gr.Radio(label="Same City", - choices=["no", "yes", "unknown"], - value="yes") - - with gr.Column(): - gr.Markdown( - """ - ## Predicción - """ - ) - - label = gr.Label(label="Tipo Fraude") - predict_btn = gr.Button(value="Evaluar") - predict_btn.click( - predict, - inputs=[ - order_amount, - order_state, - payment_method_registration_failure, - payment_method_type, - payment_method_provider, - payment_method_issuer, - transaction_amount, - transaction_failed, - email_domain, - email_provider, - customer_ip_address_version, - same_city - ], - outputs=[label], - api_name="prediccion" - ) - gr.Markdown( - """ -

    - Proyecto demo creado en el bootcamp de EDVAI 🤗 - -

    - """ - ) - -demo.launch() - diff --git a/spaces/firzaelbuho/rvc-models/infer_pack/attentions.py b/spaces/firzaelbuho/rvc-models/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/firzaelbuho/rvc-models/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/florim/MedGPT/autogpt/commands/write_tests.py b/spaces/florim/MedGPT/autogpt/commands/write_tests.py deleted file mode 100644 index 35a086536c9d05d520a84b15ead49f775eacdcc9..0000000000000000000000000000000000000000 --- a/spaces/florim/MedGPT/autogpt/commands/write_tests.py +++ /dev/null @@ -1,31 +0,0 @@ -"""A module that contains a function to generate test cases for the submitted code.""" -from __future__ import annotations - -import json - -from autogpt.llm_utils import call_ai_function - - -def write_tests(code: str, focus: list[str]) -> str: - """ - A function that takes in code and focus topics and returns a response from create - chat completion api call. - - Parameters: - focus (list): A list of suggestions around what needs to be improved. - code (str): Code for test cases to be generated against. - Returns: - A result string from create chat completion. Test cases for the submitted code - in response. - """ - - function_string = ( - "def create_test_cases(code: str, focus: Optional[str] = None) -> str:" - ) - args = [code, json.dumps(focus)] - description_string = ( - "Generates test cases for the existing code, focusing on" - " specific areas if required." - ) - - return call_ai_function(function_string, args, description_string) diff --git a/spaces/florim/MedGPT/tests/integration/weaviate_memory_tests.py b/spaces/florim/MedGPT/tests/integration/weaviate_memory_tests.py deleted file mode 100644 index 015eab05484f485aeb8ee035e92ad7811e9dddd4..0000000000000000000000000000000000000000 --- a/spaces/florim/MedGPT/tests/integration/weaviate_memory_tests.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -import sys -import unittest -from unittest import mock -from uuid import uuid4 - -from weaviate import Client -from weaviate.util import get_valid_uuid - -from autogpt.config import Config -from autogpt.memory.base import get_ada_embedding -from autogpt.memory.weaviate import WeaviateMemory - - -class TestWeaviateMemory(unittest.TestCase): - cfg = None - client = None - index = None - - @classmethod - def setUpClass(cls): - # only create the connection to weaviate once - cls.cfg = Config() - - if cls.cfg.use_weaviate_embedded: - from weaviate.embedded import EmbeddedOptions - - cls.client = Client( - embedded_options=EmbeddedOptions( - hostname=cls.cfg.weaviate_host, - port=int(cls.cfg.weaviate_port), - persistence_data_path=cls.cfg.weaviate_embedded_path, - ) - ) - else: - cls.client = Client( - f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}" - ) - - cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index) - - """ - In order to run these tests you will need a local instance of - Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose - for creating local instances using docker. - Alternatively in your .env file set the following environmental variables to run Weaviate embedded (see: https://weaviate.io/developers/weaviate/installation/embedded): - - USE_WEAVIATE_EMBEDDED=True - WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate" - """ - - def setUp(self): - try: - self.client.schema.delete_class(self.index) - except: - pass - - self.memory = WeaviateMemory(self.cfg) - - def test_add(self): - doc = "You are a Titan name Thanos and you are looking for the Infinity Stones" - self.memory.add(doc) - result = self.client.query.get(self.index, ["raw_text"]).do() - actual = result["data"]["Get"][self.index] - - self.assertEqual(len(actual), 1) - self.assertEqual(actual[0]["raw_text"], doc) - - def test_get(self): - doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos" - - with self.client.batch as batch: - batch.add_data_object( - uuid=get_valid_uuid(uuid4()), - data_object={"raw_text": doc}, - class_name=self.index, - vector=get_ada_embedding(doc), - ) - - batch.flush() - - actual = self.memory.get(doc) - - self.assertEqual(len(actual), 1) - self.assertEqual(actual[0], doc) - - def test_get_stats(self): - docs = [ - "You are now about to count the number of docs in this index", - "And then you about to find out if you can count correctly", - ] - - [self.memory.add(doc) for doc in docs] - - stats = self.memory.get_stats() - - self.assertTrue(stats) - self.assertTrue("count" in stats) - self.assertEqual(stats["count"], 2) - - def test_clear(self): - docs = [ - "Shame this is the last test for this class", - "Testing is fun when someone else is doing it", - ] - - [self.memory.add(doc) for doc in docs] - - self.assertEqual(self.memory.get_stats()["count"], 2) - - self.memory.clear() - - self.assertEqual(self.memory.get_stats()["count"], 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/flowers-team/Interactive_DeepRL_Demo/js/ui_state/store/actions.js b/spaces/flowers-team/Interactive_DeepRL_Demo/js/ui_state/store/actions.js deleted file mode 100644 index 5322ec1115aa156b3d0eb076b6e0b58d817dc0ce..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/Interactive_DeepRL_Demo/js/ui_state/store/actions.js +++ /dev/null @@ -1,398 +0,0 @@ -// All available actions -export default { - - /** - * Adds the given environment to the specified set. - * @param context {Store} - * @param payload {{set: string, env: Object}} - */ - addEnv(context, payload){ - context.commit('addEnv', payload); - }, - - /** - * Removes the environment of the given index from the custom set. - * @param context {Store} - * @param payload {number} - Index of the environment to remove - */ - deleteEnv(context, payload){ - context.commit('deleteEnv', payload); - }, - - /** - * Loads the given environment into the simulation. - * @param context {Store} - * @param payload {Object} - Environment to load - */ - loadEnv(context, payload){ - - // Resets drawing mode - drawing_canvas.clear(); - window.terrain = { - ground: [], - ceiling: [] - }; - - window.ground = [...payload.terrain.ground]; - window.ceiling = [...payload.terrain.ceiling]; - - // Updates the values of the terrain sliders - for(let param in payload.terrain.parkourConfig){ - context.commit('updateParkourConfig', {name: param, value: payload.terrain.parkourConfig[param]}); - } - for(let param in payload.terrain.creepersConfig){ - context.commit('updateParkourConfig', {name: param, value: payload.terrain.creepersConfig[param]}); - } - - // Replaces previous agents by the ones of the env - while (context.state.agents.length > 0){ - context.commit('deleteAgent', {index: 0}); - } - for(let agent of payload.agents){ - context.commit('addAgent', { - morphology: agent.morphology, - name: agent.name, - path: agent.path, - init_pos: agent.init_pos - }); - } - - // Initializes the new environment - context.commit('init_default', {}); - }, - - /** - * Initializes the game with default parameters. - * @param context {Store} - * @param payload - */ - init_default(context, payload) { - context.commit('init_default', {}); - }, - - /** - * Changes the value of the given terrain parameter. - * @param context {Store} - * @param payload {{name: string, value: number}} - Name and value of the terrain parameter to change - */ - changeParkourConfig(context, payload){ - - // Case one of the cppn dims is changed : aligns the terrain with the startpad - if(['dim1', 'dim2', 'dim3'].indexOf(payload.name) != -1){ - - window.ground = []; - window.ceiling = []; - window.align_terrain = { - align: true, - ceiling_offset: null, // aligns the ceiling with the startpad - ground_offset: null, // aligns the ground with the startpad - smoothing: window.game.env.TERRAIN_CPPN_SCALE // previous smoothing - }; - } - // Case smoothing, water_level or creepers is changed - else{ - window.align_terrain = { - align: true, - ceiling_offset: window.align_terrain.ceiling_offset, // keeps the same - ground_offset: window.align_terrain.ground_offset, // keeps the same - smoothing: window.game.env.TERRAIN_CPPN_SCALE // previous smoothing - }; - } - - // Updates the parameter - context.commit('updateParkourConfig', payload); - - // Generates the terrain if drawing mode is active - if(context.state.drawingModeState.drawing){ - context.commit('generateTerrain', true); - } - - // Resets drawing mode - drawing_canvas.clear(); - window.terrain = { - ground: [], - ceiling: [] - }; - - // Resets the simulation and deselects agent - context.commit('resetSimulation', {keepPositions: true}); - context.commit('selectAgent', -1); - }, - - /** - * Changes the value of the given switch parameter. - * @param context {Store} - * @param payload {{name: string, value: number}} - Name and value of the switch to toggle - */ - toggleSwitch(context, payload) { - switch (payload.name) { - case 'drawJoints': - context.commit('drawJoints', payload.value); - break; - case 'drawLidars': - context.commit('drawLidars', payload.value); - break; - case 'drawNames': - context.commit('drawNames', payload.value); - break; - case 'drawObservation': - context.commit('drawObservation', payload.value); - break; - case 'drawReward': - context.commit('drawReward', payload.value); - break; - } - }, - - /** - * Changes the status of the simulation. - * @param context {Store} - * @param payload {} - */ - toggleRun(context, payload) { - const status = context.state.simulationState.status; - switch (status) { - case 'init': - context.commit('startSimulation', {}); - break; - case 'running': - context.commit('pauseSimulation', {}); - break; - case 'paused': - context.commit('startSimulation', {}); - break; - } - }, - - /** - * Resets the simulation. - * @param context {Store} - * @param payload {} - */ - resetSimulation(context, payload) { - // Updates the terrain alignment - window.align_terrain = { - align: true, // aligns the terrain with the startpad - ceiling_offset: window.ceiling.length > 0 ? window.game.env.ceiling_offset - window.ceiling[0].y : null, - ground_offset: window.ground.length > 0 ? window.ground[0].y : null, // first ground y value - smoothing: window.game.env.TERRAIN_CPPN_SCALE // smoothing of the current terrain - }; - - context.commit('selectAgent', {index: -1}); - context.commit('resetSimulation', {keepPositions: false}); - }, - - /** - * Adds the given agent to the simulation. - * @param context {Store} - * @param payload {{morphology: string, name: string, path: string, init_pos: {x: number, y: number}}} - */ - addAgent(context, payload) { - // Pauses the simulation if it is running - if (context.state.simulationState.status == 'running') { - context.commit('pauseSimulation', {}); - } - context.commit('addAgent', payload); - }, - - /** - * Deletes the agent of the given index from the simulation. - * @param context {Store} - * @param payload {{index: number}} - */ - deleteAgent(context, payload) { - context.commit('deleteAgent', payload); - }, - - /** - * Sets the initial position of the agent of the given index. - * @param context {Store} - * @param payload {{index: number, init_pos: {x: number, y: number}}} - */ - setAgentInitPos(context, payload){ - context.commit('setAgentInitPos', payload); - }, - - /** - * Selects the agent of the given index. - * @param context {Store} - * @param payload {{index: number}} - */ - selectAgent(context, payload){ - context.commit('selectAgent', payload); - }, - - /** - * Follows or not the agent of the given index according to the given boolean. - * @param context {Store} - * @param payload {{index: number, value: boolean}} - */ - followAgent(context, payload) { - context.commit('followAgent', payload); - }, - - /** - * Renames the agent of the given index with the given string value. - * @param context {Store} - * @param payload {{index: number, value: string}} - */ - renameAgent(context, payload){ - context.commit('renameAgent', payload); - }, - - /** - * Selects the seed option of the given index for the given morphology. - * @param context {Store} - * @param payload {{morphology: string, index: number}} - */ - selectSeedIdx(context, payload) { - context.commit('selectSeedIdx', payload); - }, - - /** - * Adds the given morphology with the given policy seeds to the list of morphologies. - * @param context {Store} - * @param payload {{morphology: string, seeds: []}} - */ - addMorphology(context, payload) { - context.commit('addMorphology', payload); - // Selects the first seed option - context.commit('selectSeedIdx', {morphology: payload.morphology, index: 0}); - }, - - /** - * Changes the active tab. - * @param context {Store} - * @param payload {string} - Name of the tab to activate - */ - switchTab(context, payload) { - - // Switch from 'Draw Yourself!' to another tab - if(context.state.activeTab == 'draw_yourself'){ - if(payload != 'draw_yourself' && context.state.drawingModeState.drawing) { - // Generates the terrain from the drawing - context.commit('generateTerrain', true); - } - } - // Switch to 'Draw Yourself!' from another tab - else if(payload == 'draw_yourself'){ - // Generates the drawing from the terrain - context.commit('generateTerrain', false); - } - context.commit('switchTab', payload); - }, - - /** - * Activates or deactivates the ground drawing mode. - * @param context {Store} - * @param payload {boolean} - */ - drawGround(context, payload) { - context.commit('drawGround', payload); - }, - - /** - * Activates or deactivates the ceiling drawing mode. - * @param context {Store} - * @param payload {boolean} - */ - drawCeiling(context, payload) { - context.commit('drawCeiling', payload); - }, - - /** - * Activates or deactivates the erasing mode. - * @param context {Store} - * @param payload {boolean} - */ - erase(context, payload) { - context.commit('erase', payload); - }, - - /** - * Activates or deactivates the drawing mode of the given asset. - * @param context {Store} - * @param payload {{name: string, value: boolean}} - */ - drawAsset(context, payload){ - // Only supports circle asset for now - switch (payload.name){ - case 'circle': - context.commit('drawCircle', payload.value); - break; - } - }, - - /** - * Handles clicks outside the canvas when drawing. - * @param context {Store} - * @param payload {} - */ - deselectDrawingButtons(context, payload){ - context.commit('deselectDrawingButtons', payload); - }, - - /** - * Resets drawing mode. - * @param context {Store} - * @param payload {} - */ - clear(context, payload) { - context.commit('clear', payload); - context.commit('generateTerrain', false); - }, - - /** - * Generates the terrain from the drawing (true) or vice-versa (false). - * @param context {Store} - * @param payload {boolean} - */ - generateTerrain(context, payload){ - context.commit('generateTerrain', payload); - }, - - refreshDrawing(context, payload){ - context.commit('refreshDrawing', payload); - }, - - /** - * Starts (true) or exits (false) intro tour. - * @param context {Store} - * @param payload {boolean} - */ - setIntroTour(context, payload){ - context.commit('setIntroTour', payload); - }, - - /** - * Sets the language. - * @param context {Store} - * @param payload {string} - */ - setLanguage(context, payload){ - context.commit('setLanguage', payload); - - // Resets the hints options of the intro - if(window.introTour != null){ - window.introTour.setOptions({ - hintButtonLabel: window.lang_dict[context.state.language]['introHints']['buttonLabel'], - hints: [ - { - hint: window.lang_dict[context.state.language]['introHints']['tips'], - element: document.querySelector('#canvas_container'), - hintPosition: 'top-right', - } - ] - }); - - // Removes the div element of the hints - let introDiv = document.getElementsByClassName("introjs-hints")[0]; - introDiv.parentNode.removeChild(introDiv); - - // Recreates the hints - window.introTour.addHints(); - } - - window.game.env.render(); - } -}; \ No newline at end of file diff --git a/spaces/glyszt/vt/vtoonify/model/raft/alt_cuda_corr/setup.py b/spaces/glyszt/vt/vtoonify/model/raft/alt_cuda_corr/setup.py deleted file mode 100644 index c0207ff285ffac4c8146c79d154f12416dbef48c..0000000000000000000000000000000000000000 --- a/spaces/glyszt/vt/vtoonify/model/raft/alt_cuda_corr/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -from setuptools import setup -from torch.utils.cpp_extension import BuildExtension, CUDAExtension - - -setup( - name='correlation', - ext_modules=[ - CUDAExtension('alt_cuda_corr', - sources=['correlation.cpp', 'correlation_kernel.cu'], - extra_compile_args={'cxx': [], 'nvcc': ['-O3']}), - ], - cmdclass={ - 'build_ext': BuildExtension - }) - diff --git a/spaces/gordonchan/h2oo/gradio_utils/css.py b/spaces/gordonchan/h2oo/gradio_utils/css.py deleted file mode 100644 index 7db8bee879c89a28d36b2f7f5d9c1183e76c1b1c..0000000000000000000000000000000000000000 --- a/spaces/gordonchan/h2oo/gradio_utils/css.py +++ /dev/null @@ -1,60 +0,0 @@ -def get_css(kwargs) -> str: - if kwargs['h2ocolors']: - css_code = """footer {visibility: hidden;} - body{background:linear-gradient(#f5f5f5,#e5e5e5);} - body.dark{background:linear-gradient(#000000,#0d0d0d);} - """ - else: - css_code = """footer {visibility: hidden}""" - - css_code += make_css_base() - return css_code - - -def make_css_base() -> str: - css1 = """ - #col_container {margin-left: auto; margin-right: auto; text-align: left;} - """ - return css1 + """ - @import url('https://fonts.googleapis.com/css2?family=Source+Sans+Pro:wght@400;600&display=swap'); - - body.dark{#warning {background-color: #555555};} - - #small_btn { - margin: 0.6em 0em 0.55em 0; - max-width: 20em; - min-width: 5em !important; - height: 5em; - font-size: 14px !important; - } - - #prompt-form { - border: 1px solid var(--primary-500) !important; - } - - #prompt-form.block { - border-radius: var(--block-radius) !important; - } - - #prompt-form textarea { - border: 1px solid rgb(209, 213, 219); - } - - #prompt-form label > div { - margin-top: 4px; - } - - button.primary:hover { - background-color: var(--primary-600) !important; - transition: .2s; - } - - #prompt-form-area { - margin-bottom: 2.5rem; - } - .chatsmall chatbot {font-size: 10px !important} - - .gradio-container { - max-width: none !important; - } - """ diff --git a/spaces/gotiQspiryo/whisper-ui/examples/IAR Embedded Workbench For AVR V5500 Build 50277rar [CRACKED].md b/spaces/gotiQspiryo/whisper-ui/examples/IAR Embedded Workbench For AVR V5500 Build 50277rar [CRACKED].md deleted file mode 100644 index 11ff6fbc2706a12e4eaab1b355ff539975a4de6a..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/IAR Embedded Workbench For AVR V5500 Build 50277rar [CRACKED].md +++ /dev/null @@ -1,18 +0,0 @@ - -

    IAR Embedded Workbench for AVR: A Powerful Development Toolchain

    -

    IAR Embedded Workbench for AVR is a complete development toolchain that provides one toolbox in one view, giving you one uninterrupted workflow. It includes an integrated development environment (IDE) with project management tools and editor, a highly optimizing C and C++ compiler for AVR, a linker and library tools, a debugger with a simulator and hardware support, and an integrated code analysis tool. It also supports the latest C18 and C++17 language standards, additional GNU C language extensions, and Unicode characters in the source code.

    -

    With IAR Embedded Workbench for AVR, you can create compact and energy-efficient systems for a wide range of AVR devices. You can also leverage the IAR Build Tools for AVR to enable modern and scalable workflows with automated application build and test processes in cross-platform environments. Moreover, you can access free training courses through the IAR Academy on Demand offering when you buy a license of IAR Embedded Workbench for AVR or IAR Build Tools for AVR.

    -

    IAR Embedded Workbench For AVR V5500 Build 50277rar


    Download Filehttps://urlgoal.com/2uyMDO



    -

    If you want to learn more about IAR Embedded Workbench for AVR or IAR Build Tools for AVR, you can visit their official website[^1^] [^2^] or contact their sales team[^1^] [^2^]. You can also check the list of supported AVR devices[^3^] and the latest releases[^1^] [^2^] of their products.

    Key Features of IAR Embedded Workbench for AVR

    -

    IAR Embedded Workbench for AVR offers a number of key features that make it a powerful and reliable development toolchain for AVR microcontrollers. Some of these features are:

    -
      -
    • User-friendly IDE: The integrated development environment provides project management tools and editor with linker and library tools, example projects and code templates for AVR. It also supports editor themes, syntax feedback, improved parameter hints, window color themes, and gray out inactive code support.
    • -
    • Powerful build tools: The IAR C/C++ Compiler is a highly optimizing compiler for AVR that offers configuration files for all AVR Classic, ATmega and FPSLIC families, including devices with the enhanced cores, relocating AVR assembler and run-time libraries. It also supports the C18 and C++17 language standards, additional GNU C language extensions, and Unicode characters in the source code.
    • -
    • Comprehensive debugger: The C-SPY Debugger provides an AVR simulator, support for RTOS-aware debugging on hardware and extensive hardware debugger support. It also supports Visual Studio Code integration through the IAR Build and IAR C-SPY Debug extensions available on Microsoft Marketplace.
    • -
    • Integrated code analysis: IAR Embedded Workbench for AVR features an add-on product C-STAT that provides integrated static code analysis to ensure code quality and alignment with standards such as MISRA C:2012. It also supports more reporting and configuration methods through the iarbuild command line build utility.
    • -
    • On-demand training included: When you buy a license of IAR Embedded Workbench for AVR, you get access to free training courses through the IAR Academy on Demand offering that covers topics such as getting started, debugging, optimization, security, and functional safety.
    • -
    -

    IAR Embedded Workbench for AVR is a proven development toolchain that helps you create high-quality applications for AVR microcontrollers. Whether you are working on automotive, industrial, consumer, or IoT applications, you can trust IAR Embedded Workbench for AVR to deliver the best performance and code quality.

    -

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/gradio/automatic-speech-recognition/run.py b/spaces/gradio/automatic-speech-recognition/run.py deleted file mode 100644 index 0bb8cf6b8d7e0d2d6d0d247f3a5f0c95b32453b2..0000000000000000000000000000000000000000 --- a/spaces/gradio/automatic-speech-recognition/run.py +++ /dev/null @@ -1,17 +0,0 @@ -import gradio as gr -import os - -# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting -auth_token = os.getenv("auth_token") - -# automatically load the interface from a HF model -# you can remove the api_key parameter if you don't care about rate limiting. -demo = gr.load( - "huggingface/facebook/wav2vec2-base-960h", - title="Speech-to-text", - inputs="mic", - description="Let me try to guess what you're saying!", - hf_token=auth_token -) - -demo.launch() diff --git a/spaces/gradio/longformer/tvm/contrib/dlpack.py b/spaces/gradio/longformer/tvm/contrib/dlpack.py deleted file mode 100644 index a42536745a7d81f488dd11e410746819ac68a42d..0000000000000000000000000000000000000000 --- a/spaces/gradio/longformer/tvm/contrib/dlpack.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -"""Wrapping functions to bridge frameworks with DLPack support to TVM""" -from .. import ndarray - -def convert_func(tvm_func, tensor_type, to_dlpack_func): - """Convert a tvm function into one that accepts a tensor from another - framework, provided the other framework supports DLPACK - - Parameters - ---------- - tvm_func: Function - Built tvm function operating on arrays - - tensor_type: Type - Type of the tensors of the target framework - - to_dlpack_func: Function - Function to convert the source tensors to DLPACK - """ - assert callable(tvm_func) - - def _wrapper(*args): - args = tuple(ndarray.from_dlpack(to_dlpack_func(arg))\ - if isinstance(arg, tensor_type) else arg for arg in args) - return tvm_func(*args) - - return _wrapper - -def to_pytorch_func(tvm_func): - """Convert a tvm function into one that accepts PyTorch tensors - - Parameters - ---------- - tvm_func: Function - Built tvm function operating on arrays - - Returns - ------- - wrapped_func: Function - Wrapped tvm function that operates on PyTorch tensors - """ - import torch - import torch.utils.dlpack - return convert_func(tvm_func, torch.Tensor, torch.utils.dlpack.to_dlpack) diff --git a/spaces/gylleus/icongen/torch_utils/ops/upfirdn2d.h b/spaces/gylleus/icongen/torch_utils/ops/upfirdn2d.h deleted file mode 100644 index c9e2032bcac9d2abde7a75eea4d812da348afadd..0000000000000000000000000000000000000000 --- a/spaces/gylleus/icongen/torch_utils/ops/upfirdn2d.h +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include - -//------------------------------------------------------------------------ -// CUDA kernel parameters. - -struct upfirdn2d_kernel_params -{ - const void* x; - const float* f; - void* y; - - int2 up; - int2 down; - int2 pad0; - int flip; - float gain; - - int4 inSize; // [width, height, channel, batch] - int4 inStride; - int2 filterSize; // [width, height] - int2 filterStride; - int4 outSize; // [width, height, channel, batch] - int4 outStride; - int sizeMinor; - int sizeMajor; - - int loopMinor; - int loopMajor; - int loopX; - int launchMinor; - int launchMajor; -}; - -//------------------------------------------------------------------------ -// CUDA kernel specialization. - -struct upfirdn2d_kernel_spec -{ - void* kernel; - int tileOutW; - int tileOutH; - int loopMinor; - int loopX; -}; - -//------------------------------------------------------------------------ -// CUDA kernel selection. - -template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p); - -//------------------------------------------------------------------------ diff --git a/spaces/h2oai/wave-tour/examples/stat_tall_gauge.py b/spaces/h2oai/wave-tour/examples/stat_tall_gauge.py deleted file mode 100644 index 67260b44435fd0a2472753040fa257298e02ad74..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/stat_tall_gauge.py +++ /dev/null @@ -1,34 +0,0 @@ -# Stat / Gauge / Tall -# Create a tall stat card displaying a primary value, an auxiliary value and a #progress gauge. -# #stat_card -# --- -import time - -from faker import Faker - -from synth import FakePercent -from h2o_wave import site, ui - -page = site['/demo'] - -fake = Faker() -f = FakePercent() -val, pc = f.next() -c = page.add('example', ui.tall_gauge_stat_card( - box='1 1 1 2', - title=fake.cryptocurrency_name(), - value='=${{intl foo minimum_fraction_digits=2 maximum_fraction_digits=2}}', - aux_value='={{intl bar style="percent" minimum_fraction_digits=2 maximum_fraction_digits=2}}', - plot_color='$red', - progress=pc, - data=dict(foo=val, bar=pc), -)) -page.save() - -while True: - time.sleep(1) - val, pc = f.next() - c.data.foo = val - c.data.bar = pc - c.progress = pc - page.save() diff --git a/spaces/hahahafofo/ChatPDF/app.py b/spaces/hahahafofo/ChatPDF/app.py deleted file mode 100644 index 9e74c0266a119a21d738a960563628b94cfb9efa..0000000000000000000000000000000000000000 --- a/spaces/hahahafofo/ChatPDF/app.py +++ /dev/null @@ -1,290 +0,0 @@ -# -*- coding: utf-8 -*- -""" -@author:XuMing(xuming624@qq.com) -@description: -modified from https://github.com/imClumsyPanda/langchain-ChatGLM/blob/master/webui.py -""" -import gradio as gr -import os -import shutil -from loguru import logger -from chatpdf import ChatPDF -import hashlib -from typing import List - -pwd_path = os.path.abspath(os.path.dirname(__file__)) - -CONTENT_DIR = os.path.join(pwd_path, "content") -logger.info(f"CONTENT_DIR: {CONTENT_DIR}") -VECTOR_SEARCH_TOP_K = 3 -MAX_INPUT_LEN = 2048 - -embedding_model_dict = { - "text2vec-large": "GanymedeNil/text2vec-large-chinese", - "text2vec-base": "shibing624/text2vec-base-chinese", - "sentence-transformers": "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2", - "ernie-tiny": "nghuyong/ernie-3.0-nano-zh", - "ernie-base": "nghuyong/ernie-3.0-base-zh", - -} - -# supported LLM models -llm_model_dict = { - - # "chatglm-6b": "E:\\sdwebui\\image2text_prompt_generator\\models\\chatglm-6b", - "chatglm-6b-int4": "THUDM/chatglm-6b-int4", - "chatglm-6b": "THUDM/chatglm-6b", - "chatglm-6b-int4-qe": "THUDM/chatglm-6b-int4-qe", - "llama-7b": "decapoda-research/llama-7b-hf", - "llama-13b": "decapoda-research/llama-13b-hf", - "t5-lamini-flan-783M": "MBZUAI/LaMini-Flan-T5-783M", -} - -llm_model_dict_list = list(llm_model_dict.keys()) -embedding_model_dict_list = list(embedding_model_dict.keys()) - -model = None - - -def get_file_list(): - if not os.path.exists("content"): - return [] - return [f for f in os.listdir("content") if - f.endswith(".txt") or f.endswith(".pdf") or f.endswith(".docx") or f.endswith(".md")] - - -def upload_file(file, file_list): - if not os.path.exists(CONTENT_DIR): - os.mkdir(CONTENT_DIR) - filename = os.path.basename(file.name) - shutil.move(file.name, os.path.join(CONTENT_DIR, filename)) - # file_list首位插入新上传的文件 - file_list.insert(0, filename) - return gr.Dropdown.update(choices=file_list, value=filename), file_list - - -def parse_text(text): - """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" - lines = text.split("\n") - lines = [line for line in lines if line != ""] - count = 0 - for i, line in enumerate(lines): - if "```" in line: - count += 1 - items = line.split('`') - if count % 2 == 1: - lines[i] = f'
    '
    -            else:
    -                lines[i] = f'
    ' - else: - if i > 0: - if count % 2 == 1: - line = line.replace("`", "\`") - line = line.replace("<", "<") - line = line.replace(">", ">") - line = line.replace(" ", " ") - line = line.replace("*", "*") - line = line.replace("_", "_") - line = line.replace("-", "-") - line = line.replace(".", ".") - line = line.replace("!", "!") - line = line.replace("(", "(") - line = line.replace(")", ")") - line = line.replace("$", "$") - lines[i] = "
    " + line - text = "".join(lines) - return text - - -def get_answer( - query, - index_path, - history, - topn: int = VECTOR_SEARCH_TOP_K, - max_input_size: int = 1024, - chat_mode: str = "pdf" -): - global model - - if model is None: - return [None, "模型还未加载"], query - if index_path and chat_mode == "pdf": - if not model.sim_model.corpus_embeddings: - model.load_index(index_path) - response, empty_history, reference_results = model.query(query=query, topn=topn, max_input_size=max_input_size) - - logger.debug(f"query: {query}, response with content: {response}") - for i in range(len(reference_results)): - r = reference_results[i] - response += f"\n{r.strip()}" - response = parse_text(response) - history = history + [[query, response]] - else: - # 未加载文件,仅返回生成模型结果 - response, empty_history = model.chat(query, history) - response = parse_text(response) - history = history + [[query, response]] - logger.debug(f"query: {query}, response: {response}") - return history, "" - - -def update_status(history, status): - history = history + [[None, status]] - logger.info(status) - return history - - -def reinit_model(llm_model, embedding_model, history): - try: - global model - if model is not None: - del model - model = ChatPDF( - sim_model_name_or_path=embedding_model_dict.get( - embedding_model, - "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2" - ), - gen_model_type=llm_model.split('-')[0], - gen_model_name_or_path=llm_model_dict.get(llm_model, "THUDM/chatglm-6b-int4"), - lora_model_name_or_path=None, - ) - - model_status = """模型已成功重新加载,请选择文件后点击"加载文件"按钮""" - except Exception as e: - model = None - logger.error(e) - model_status = """模型未成功重新加载,请重新选择后点击"加载模型"按钮""" - return history + [[None, model_status]] - - -def get_file_hash(fpath): - return hashlib.md5(open(fpath, 'rb').read()).hexdigest() - - -def get_vector_store(filepath, history, embedding_model): - logger.info(filepath, history) - index_path = None - file_status = '' - if model is not None: - - local_file_path = os.path.join(CONTENT_DIR, filepath) - - local_file_hash = get_file_hash(local_file_path) - index_file_name = f"{filepath}.{embedding_model}.{local_file_hash}.index.json" - - local_index_path = os.path.join(CONTENT_DIR, index_file_name) - - if os.path.exists(local_index_path): - model.load_index(local_index_path) - index_path = local_index_path - file_status = "文件已成功加载,请开始提问" - - elif os.path.exists(local_file_path): - model.load_pdf_file(local_file_path) - model.save_index(local_index_path) - index_path = local_index_path - if index_path: - file_status = "文件索引并成功加载,请开始提问" - else: - file_status = "文件未成功加载,请重新上传文件" - else: - file_status = "模型未完成加载,请先在加载模型后再导入文件" - - return index_path, history + [[None, file_status]] - - -def reset_chat(chatbot, state): - return None, None - - -block_css = """.importantButton { - background: linear-gradient(45deg, #7e0570,#5d1c99, #6e00ff) !important; - border: none !important; -} -.importantButton:hover { - background: linear-gradient(45deg, #ff00e0,#8500ff, #6e00ff) !important; - border: none !important; -}""" - -webui_title = """ -# 🎉ChatPDF WebUI🎉 -Link in: [https://github.com/zhongpei/ChatPDF](https://github.com/zhongpei/ChatPDF) Test for MBZUAI/LaMini-Flan-T5-783M -""" - -init_message = """欢迎使用 ChatPDF Web UI,可以直接提问或上传文件后提问 """ - -with gr.Blocks(css=block_css) as demo: - index_path, file_status, model_status = gr.State(""), gr.State(""), gr.State("") - file_list = gr.State(get_file_list()) - gr.Markdown(webui_title) - with gr.Row(): - with gr.Column(scale=2): - chatbot = gr.Chatbot([[None, init_message], [None, None]], - elem_id="chat-box", - show_label=False).style(height=700) - query = gr.Textbox(show_label=False, - placeholder="请输入提问内容,按回车进行提交", - ).style(container=False) - clear_btn = gr.Button('🔄Clear!', elem_id='clear').style(full_width=True) - with gr.Column(scale=1): - llm_model = gr.Radio(llm_model_dict_list, - label="LLM 模型", - value=list(llm_model_dict.keys())[0], - interactive=True) - embedding_model = gr.Radio(embedding_model_dict_list, - label="Embedding 模型", - value=embedding_model_dict_list[0], - interactive=True) - - load_model_button = gr.Button("重新加载模型" if model is not None else "加载模型") - - with gr.Row(): - chat_mode = gr.Radio(choices=["chat", "pdf"], value="pdf", label="聊天模式") - - with gr.Row(): - topn = gr.Slider(1, 100, 20, step=1, label="最大搜索数量") - max_input_size = gr.Slider(512, 4096, MAX_INPUT_LEN, step=10, label="摘要最大长度") - with gr.Tab("select"): - with gr.Row(): - selectFile = gr.Dropdown( - file_list.value, - label="content file", - interactive=True, - value=file_list.value[0] if len(file_list.value) > 0 else None - ) - # get_file_list_btn = gr.Button('🔄').style(width=10) - with gr.Tab("upload"): - file = gr.File( - label="content file", - file_types=['.txt', '.md', '.docx', '.pdf'] - ) - load_file_button = gr.Button("加载文件") - - load_model_button.click( - reinit_model, - show_progress=True, - inputs=[llm_model, embedding_model, chatbot], - outputs=chatbot - ) - # 将上传的文件保存到content文件夹下,并更新下拉框 - file.upload( - upload_file, - inputs=[file, file_list], - outputs=[selectFile, file_list] - ) - load_file_button.click( - get_vector_store, - show_progress=True, - inputs=[selectFile, chatbot, embedding_model], - outputs=[index_path, chatbot], - ) - query.submit( - get_answer, - [query, index_path, chatbot, topn, max_input_size, chat_mode], - [chatbot, query], - ) - clear_btn.click(reset_chat, [chatbot, query], [chatbot, query]) - -demo.queue(concurrency_count=3).launch( - server_name='0.0.0.0', share=False, inbrowser=False -) diff --git a/spaces/hamacojr/CAT-Seg/open_clip/src/training/file_utils.py b/spaces/hamacojr/CAT-Seg/open_clip/src/training/file_utils.py deleted file mode 100644 index 8ec933b7d718bf4efb59962fdd97e18357303865..0000000000000000000000000000000000000000 --- a/spaces/hamacojr/CAT-Seg/open_clip/src/training/file_utils.py +++ /dev/null @@ -1,83 +0,0 @@ -import logging -import os -import multiprocessing -import subprocess -import time -import fsspec -import torch -from tqdm import tqdm - -def remote_sync_s3(local_dir, remote_dir): - # skip epoch_latest which can change during sync. - result = subprocess.run(["aws", "s3", "sync", local_dir, remote_dir, '--exclude', '*epoch_latest.pt'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if result.returncode != 0: - logging.error(f"Error: Failed to sync with S3 bucket {result.stderr.decode('utf-8')}") - return False - - logging.info(f"Successfully synced with S3 bucket") - return True - -def remote_sync_fsspec(local_dir, remote_dir): - # FIXME currently this is slow and not recommended. Look into speeding up. - a = fsspec.get_mapper(local_dir) - b = fsspec.get_mapper(remote_dir) - - for k in a: - # skip epoch_latest which can change during sync. - if 'epoch_latest.pt' in k: - continue - - logging.info(f'Attempting to sync {k}') - if k in b and len(a[k]) == len(b[k]): - logging.debug(f'Skipping remote sync for {k}.') - continue - - try: - logging.info(f'Successful sync for {k}.') - b[k] = a[k] - except Exception as e: - logging.info(f'Error during remote sync for {k}: {e}') - return False - - return True - -def remote_sync(local_dir, remote_dir, protocol): - logging.info('Starting remote sync.') - if protocol == 's3': - return remote_sync_s3(local_dir, remote_dir) - elif protocol == 'fsspec': - return remote_sync_fsspec(local_dir, remote_dir) - else: - logging.error('Remote protocol not known') - return False - -def keep_running_remote_sync(sync_every, local_dir, remote_dir, protocol): - while True: - time.sleep(sync_every) - remote_sync(local_dir, remote_dir, protocol) - -def start_sync_process(sync_every, local_dir, remote_dir, protocol): - p = multiprocessing.Process(target=keep_running_remote_sync, args=(sync_every, local_dir, remote_dir, protocol)) - return p - -# Note: we are not currently using this save function. -def pt_save(pt_obj, file_path): - of = fsspec.open(file_path, "wb") - with of as f: - torch.save(pt_obj, file_path) - -def pt_load(file_path, map_location=None): - if not file_path.startswith('/'): - logging.info('Loading remote checkpoint, which may take a bit.') - of = fsspec.open(file_path, "rb") - with of as f: - out = torch.load(f, map_location=map_location) - return out - -def check_exists(file_path): - try: - with fsspec.open(file_path): - pass - except FileNotFoundError: - return False - return True diff --git a/spaces/hamelcubsfan/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md b/spaces/hamelcubsfan/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index a4f28a3d27d66d79cb95f2b8b847832172bb5f11..0000000000000000000000000000000000000000 --- a/spaces/hamelcubsfan/AutoGPT/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,40 +0,0 @@ - - - - -### Background - - -### Changes - - -### Documentation - - -### Test Plan - - -### PR Quality Checklist -- [ ] My pull request is atomic and focuses on a single change. -- [ ] I have thoroughly tested my changes with multiple different prompts. -- [ ] I have considered potential risks and mitigations for my changes. -- [ ] I have documented my changes clearly and comprehensively. -- [ ] I have not snuck in any "extra" small tweaks changes - - - - diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/csrc/ml_nms.h b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/csrc/ml_nms.h deleted file mode 100644 index e30cd6db9109a0a94e98bba5b8669d90853e0922..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/csrc/ml_nms.h +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -#pragma once -#include "cpu/vision.h" - -#ifdef WITH_CUDA -#include "cuda/vision.h" -#endif - - -at::Tensor ml_nms(const at::Tensor& dets, - const at::Tensor& scores, - const at::Tensor& labels, - const float threshold) { - - if (dets.device().is_cuda()) { -#ifdef WITH_CUDA - // TODO raise error if not compiled with CUDA - if (dets.numel() == 0) - return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); - auto b = at::cat({dets, scores.unsqueeze(1), labels.unsqueeze(1)}, 1); - return ml_nms_cuda(b, threshold); -#else - AT_ERROR("Not compiled with GPU support"); -#endif - } - AT_ERROR("CPU version not implemented"); -} diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/language_backbone/word_utils.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/language_backbone/word_utils.py deleted file mode 100644 index c14148413997d09479c9258cdbc404bdf07287a4..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/language_backbone/word_utils.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -Language-related data loading helper functions and class wrappers. -""" - -import re -import torch -import codecs - -UNK_TOKEN = '' -PAD_TOKEN = '' -END_TOKEN = '' -SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)') - - -class Dictionary(object): - def __init__(self): - self.word2idx = {} - self.idx2word = [] - - def add_word(self, word): - if word not in self.word2idx: - self.idx2word.append(word) - self.word2idx[word] = len(self.idx2word) - 1 - return self.word2idx[word] - - def __len__(self): - return len(self.idx2word) - - def __getitem__(self, a): - if isinstance(a, int): - return self.idx2word[a] - elif isinstance(a, list): - return [self.idx2word[x] for x in a] - elif isinstance(a, str): - return self.word2idx[a] - else: - raise TypeError("Query word/index argument must be int or str") - - def __contains__(self, word): - return word in self.word2idx - - -class Corpus(object): - def __init__(self): - self.dictionary = Dictionary() - - def set_max_len(self, value): - self.max_len = value - - def load_file(self, filename): - with codecs.open(filename, 'r', 'utf-8') as f: - for line in f: - line = line.strip() - self.add_to_corpus(line) - self.dictionary.add_word(UNK_TOKEN) - self.dictionary.add_word(PAD_TOKEN) - - def add_to_corpus(self, line): - """Tokenizes a text line.""" - # Add words to the dictionary - words = line.split() - # tokens = len(words) - for word in words: - word = word.lower() - self.dictionary.add_word(word) - - def tokenize(self, line, max_len=20): - # Tokenize line contents - words = SENTENCE_SPLIT_REGEX.split(line.strip()) - # words = [w.lower() for w in words if len(w) > 0] - words = [w.lower() for w in words if (len(w) > 0 and w != ' ')] ## do not include space as a token - - if words[-1] == '.': - words = words[:-1] - - if max_len > 0: - if len(words) > max_len: - words = words[:max_len] - elif len(words) < max_len: - # words = [PAD_TOKEN] * (max_len - len(words)) + words - words = words + [END_TOKEN] + [PAD_TOKEN] * (max_len - len(words) - 1) - - tokens = len(words) ## for end token - ids = torch.LongTensor(tokens) - token = 0 - for word in words: - if word not in self.dictionary: - word = UNK_TOKEN - # print(word, type(word), word.encode('ascii','ignore').decode('ascii'), type(word.encode('ascii','ignore').decode('ascii'))) - if type(word) != type('a'): - print(word, type(word), word.encode('ascii', 'ignore').decode('ascii'), - type(word.encode('ascii', 'ignore').decode('ascii'))) - word = word.encode('ascii', 'ignore').decode('ascii') - ids[token] = self.dictionary[word] - token += 1 - # ids[token] = self.dictionary[END_TOKEN] - return ids - - def __len__(self): - return len(self.dictionary) diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/structures/boxlist_ops.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/structures/boxlist_ops.py deleted file mode 100644 index c16ff0fd402f948336e2adfd8138677c7f43e4c8..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/structures/boxlist_ops.py +++ /dev/null @@ -1,184 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import torch - -from .bounding_box import BoxList - -from maskrcnn_benchmark.layers import nms as _box_nms -from maskrcnn_benchmark.layers import ml_nms as _box_ml_nms - - -def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="score"): - """ - Performs non-maximum suppression on a boxlist, with scores specified - in a boxlist field via score_field. - - Arguments: - boxlist(BoxList) - nms_thresh (float) - max_proposals (int): if > 0, then only the top max_proposals are kept - after non-maxium suppression - score_field (str) - """ - if nms_thresh <= 0: - return boxlist - mode = boxlist.mode - boxlist = boxlist.convert("xyxy") - boxes = boxlist.bbox - score = boxlist.get_field(score_field) - keep = _box_nms(boxes, score, nms_thresh) - if max_proposals > 0: - keep = keep[: max_proposals] - boxlist = boxlist[keep] - return boxlist.convert(mode) - - -def boxlist_ml_nms(boxlist, nms_thresh, max_proposals=-1, - score_field="scores", label_field="labels"): - """ - Performs non-maximum suppression on a boxlist, with scores specified - in a boxlist field via score_field. - - Arguments: - boxlist(BoxList) - nms_thresh (float) - max_proposals (int): if > 0, then only the top max_proposals are kept - after non-maximum suppression - score_field (str) - """ - if nms_thresh <= 0: - return boxlist - mode = boxlist.mode - boxlist = boxlist.convert("xyxy") - boxes = boxlist.bbox - scores = boxlist.get_field(score_field) - labels = boxlist.get_field(label_field) - - if boxes.device==torch.device("cpu"): - keep = [] - unique_labels = torch.unique(labels) - print(unique_labels) - for j in unique_labels: - inds = (labels == j).nonzero().view(-1) - - scores_j = scores[inds] - boxes_j = boxes[inds, :].view(-1, 4) - keep_j = _box_nms(boxes_j, scores_j, nms_thresh) - - keep += keep_j - else: - keep = _box_ml_nms(boxes, scores, labels.float(), nms_thresh) - - if max_proposals > 0: - keep = keep[: max_proposals] - boxlist = boxlist[keep] - - return boxlist.convert(mode) - - -def remove_small_boxes(boxlist, min_size): - """ - Only keep boxes with both sides >= min_size - - Arguments: - boxlist (Boxlist) - min_size (int) - """ - # WORK AROUND: work around unbind using split + squeeze. - xywh_boxes = boxlist.convert("xywh").bbox - _, _, ws, hs = xywh_boxes.split(1, dim=1) - ws = ws.squeeze(1) - hs = hs.squeeze(1) - keep = ((ws >= min_size) & (hs >= min_size)).nonzero().squeeze(1) - return boxlist[keep] - - -# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py -# with slight modifications -def boxlist_iou(boxlist1, boxlist2): - """Compute the intersection over union of two set of boxes. - The box order must be (xmin, ymin, xmax, ymax). - - Arguments: - box1: (BoxList) bounding boxes, sized [N,4]. - box2: (BoxList) bounding boxes, sized [M,4]. - - Returns: - (tensor) iou, sized [N,M]. - - Reference: - https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py - """ - if boxlist1.size != boxlist2.size: - raise RuntimeError( - "boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2)) - - N = len(boxlist1) - M = len(boxlist2) - - area1 = boxlist1.area() - area2 = boxlist2.area() - - box1, box2 = boxlist1.bbox, boxlist2.bbox - - lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2] - rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2] - - TO_REMOVE = 1 - - wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2] - inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] - - iou = inter / (area1[:, None] + area2 - inter) - return iou - - -# TODO redundant, remove -def _cat(tensors, dim=0): - """ - Efficient version of torch.cat that avoids a copy if there is only a single element in a list - """ - assert isinstance(tensors, (list, tuple)) - if len(tensors) == 1: - return tensors[0] - if isinstance(tensors[0], torch.Tensor): - return torch.cat(tensors, dim) - else: - return cat_boxlist(tensors) - -def cat_boxlist(bboxes): - """ - Concatenates a list of BoxList (having the same image size) into a - single BoxList - - Arguments: - bboxes (list[BoxList]) - """ - assert isinstance(bboxes, (list, tuple)) - assert all(isinstance(bbox, BoxList) for bbox in bboxes) - - size = bboxes[0].size - assert all(bbox.size == size for bbox in bboxes) - - mode = bboxes[0].mode - assert all(bbox.mode == mode for bbox in bboxes) - - fields = set(bboxes[0].fields()) - assert all(set(bbox.fields()) == fields for bbox in bboxes) - - cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode) - - for field in fields: - data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0) - cat_boxes.add_field(field, data) - - return cat_boxes - - -def getUnionBBox(aBB, bBB, margin = 10): - assert aBB.size==bBB.size - assert aBB.mode==bBB.mode - ih, iw = aBB.size - union_boxes = torch.cat([(torch.min(aBB.bbox[:,[0,1]], bBB.bbox[:,[0,1]]) - margin).clamp(min=0), \ - (torch.max(aBB.bbox[:,[2]], bBB.bbox[:,[2]]) + margin).clamp(max=iw), \ - (torch.max(aBB.bbox[:,[3]], bBB.bbox[:,[3]]) + margin).clamp(max=ih)], dim=1) - return BoxList(union_boxes, aBB.size, mode=aBB.mode) diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/modules/deeplab.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/modules/deeplab.py deleted file mode 100644 index fd25b78369b27ef02c183a0b17b9bf8354c5f7c3..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/modules/deeplab.py +++ /dev/null @@ -1,84 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as functional - -from models._util import try_index -from .bn import ABN - - -class DeeplabV3(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels=256, - dilations=(12, 24, 36), - norm_act=ABN, - pooling_size=None): - super(DeeplabV3, self).__init__() - self.pooling_size = pooling_size - - self.map_convs = nn.ModuleList([ - nn.Conv2d(in_channels, hidden_channels, 1, bias=False), - nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[0], padding=dilations[0]), - nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[1], padding=dilations[1]), - nn.Conv2d(in_channels, hidden_channels, 3, bias=False, dilation=dilations[2], padding=dilations[2]) - ]) - self.map_bn = norm_act(hidden_channels * 4) - - self.global_pooling_conv = nn.Conv2d(in_channels, hidden_channels, 1, bias=False) - self.global_pooling_bn = norm_act(hidden_channels) - - self.red_conv = nn.Conv2d(hidden_channels * 4, out_channels, 1, bias=False) - self.pool_red_conv = nn.Conv2d(hidden_channels, out_channels, 1, bias=False) - self.red_bn = norm_act(out_channels) - - self.reset_parameters(self.map_bn.activation, self.map_bn.slope) - - def reset_parameters(self, activation, slope): - gain = nn.init.calculate_gain(activation, slope) - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.xavier_normal_(m.weight.data, gain) - if hasattr(m, "bias") and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, ABN): - if hasattr(m, "weight") and m.weight is not None: - nn.init.constant_(m.weight, 1) - if hasattr(m, "bias") and m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - # Map convolutions - out = torch.cat([m(x) for m in self.map_convs], dim=1) - out = self.map_bn(out) - out = self.red_conv(out) - - # Global pooling - pool = self._global_pooling(x) - pool = self.global_pooling_conv(pool) - pool = self.global_pooling_bn(pool) - pool = self.pool_red_conv(pool) - if self.training or self.pooling_size is None: - pool = pool.repeat(1, 1, x.size(2), x.size(3)) - - out += pool - out = self.red_bn(out) - return out - - def _global_pooling(self, x): - if self.training or self.pooling_size is None: - pool = x.view(x.size(0), x.size(1), -1).mean(dim=-1) - pool = pool.view(x.size(0), x.size(1), 1, 1) - else: - pooling_size = (min(try_index(self.pooling_size, 0), x.shape[2]), - min(try_index(self.pooling_size, 1), x.shape[3])) - padding = ( - (pooling_size[1] - 1) // 2, - (pooling_size[1] - 1) // 2 if pooling_size[1] % 2 == 1 else (pooling_size[1] - 1) // 2 + 1, - (pooling_size[0] - 1) // 2, - (pooling_size[0] - 1) // 2 if pooling_size[0] % 2 == 1 else (pooling_size[0] - 1) // 2 + 1 - ) - - pool = functional.avg_pool2d(x, pooling_size, stride=1) - pool = functional.pad(pool, pad=padding, mode="replicate") - return pool diff --git a/spaces/hasibzunair/fifa-tryon-demo/models/networks_backup.py b/spaces/hasibzunair/fifa-tryon-demo/models/networks_backup.py deleted file mode 100644 index ad5182af00888273f06924229321d0a9c9ea87f8..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/models/networks_backup.py +++ /dev/null @@ -1,1730 +0,0 @@ -# Copyright (C) 2017 NVIDIA Corporation. All rights reserved. -# Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). -from torchvision import models -import torch -import os -import torch.nn as nn -import functools -from torch.autograd import Variable -import numpy as np -import torch.nn.functional as F -import math -import torch -import itertools -import numpy as np -import torch.nn as nn -import torch.nn.functional as F -from grid_sample import grid_sample -from torch.autograd import Variable -from tps_grid_gen import TPSGridGen -import ipdb - -############################################################################### -# Functions -############################################################################### - - -def weights_init(m): - classname = m.__class__.__name__ - if classname.find('Conv2d') != -1: - m.weight.data.normal_(0.0, 0.02) - elif classname.find('BatchNorm2d') != -1: - m.weight.data.normal_(1.0, 0.02) - m.bias.data.fill_(0) - - -def get_norm_layer(norm_type='instance'): - if norm_type == 'batch': - norm_layer = functools.partial(nn.BatchNorm2d, affine=True) - elif norm_type == 'instance': - norm_layer = functools.partial(nn.InstanceNorm2d, affine=False) - else: - raise NotImplementedError( - 'normalization layer [%s] is not found' % norm_type) - return norm_layer - - -def define_G(input_nc, output_nc, ngf, netG, L=1, S=1, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1, - n_blocks_local=3, norm='instance', gpu_ids=[]): - norm_layer = get_norm_layer(norm_type=norm) - if netG == 'global': - netG = GlobalGenerator(input_nc, output_nc, L, S, ngf, - n_downsample_global, n_blocks_global, norm_layer) - elif netG == 'local': - netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, - n_local_enhancers, n_blocks_local, norm_layer) - else: - raise ('generator not implemented!') - print(netG) - if len(gpu_ids) > 0: - assert (torch.cuda.is_available()) - netG.cuda(gpu_ids[0]) - netG.apply(weights_init) - return netG - - -def define_Unet(input_nc, gpu_ids=[]): - netG = Unet(input_nc) - netG.cuda(gpu_ids[0]) - netG.apply(weights_init) - return netG - - -def define_UnetMask(input_nc, gpu_ids=[]): - netG = UnetMask(input_nc, output_nc=4) - netG.cuda(gpu_ids[0]) - netG.apply(weights_init) - return netG - - -def define_Refine(input_nc, output_nc, gpu_ids=[]): - netG = Refine(input_nc, output_nc) - netG.cuda(gpu_ids[0]) - netG.apply(weights_init) - return netG - - -def define_D(input_nc, ndf, n_layers_D, norm='instance', use_sigmoid=False, num_D=1, getIntermFeat=False, gpu_ids=[]): - norm_layer = get_norm_layer(norm_type=norm) - netD = MultiscaleDiscriminator( - input_nc, ndf, n_layers_D, norm_layer, use_sigmoid, num_D, getIntermFeat) - print(netD) - if len(gpu_ids) > 0: - assert (torch.cuda.is_available()) - netD.cuda(gpu_ids[0]) - netD.apply(weights_init) - return netD - - -def define_VAE(input_nc, gpu_ids=[]): - netVAE = VAE(19, 32, 32, 1024) - print(netVAE) - if len(gpu_ids) > 0: - assert (torch.cuda.is_available()) - netVAE.cuda(gpu_ids[0]) - return netVAE - - -def define_B(input_nc, output_nc, ngf, n_downsample_global=3, n_blocks_global=3, norm='instance', gpu_ids=[]): - norm_layer = get_norm_layer(norm_type=norm) - netB = BlendGenerator(input_nc, output_nc, ngf, - n_downsample_global, n_blocks_global, norm_layer) - print(netB) - if len(gpu_ids) > 0: - assert (torch.cuda.is_available()) - netB.cuda(gpu_ids[0]) - netB.apply(weights_init) - return netB - - -def define_partial_enc(input_nc, gpu_ids=[]): - net = PartialConvEncoder(input_nc) - print(net) - if len(gpu_ids) > 0: - assert (torch.cuda.is_available()) - net.cuda(gpu_ids[0]) - net.apply(weights_init) - return net - - -def define_conv_enc(input_nc, gpu_ids=[]): - net = ConvEncoder(input_nc) - print(net) - if len(gpu_ids) > 0: - assert (torch.cuda.is_available()) - net.cuda(gpu_ids[0]) - net.apply(weights_init) - return net - - -def define_AttG(output_nc, gpu_ids=[]): - net = AttGenerator(output_nc) - print(net) - if len(gpu_ids) > 0: - assert (torch.cuda.is_available()) - net.cuda(gpu_ids[0]) - net.apply(weights_init) - return net - - -def print_network(net): - if isinstance(net, list): - net = net[0] - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - print(net) - print('Total number of parameters: %d' % num_params) - - -############################################################################## -# Losses -############################################################################## -class GANLoss(nn.Module): - def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, - tensor=torch.FloatTensor): - super(GANLoss, self).__init__() - self.real_label = target_real_label - self.fake_label = target_fake_label - self.real_label_var = None - self.fake_label_var = None - self.Tensor = tensor - if use_lsgan: - self.loss = nn.MSELoss() - else: - self.loss = nn.BCELoss() - - def get_target_tensor(self, input, target_is_real): - target_tensor = None - if target_is_real: - create_label = ((self.real_label_var is None) or - (self.real_label_var.numel() != input.numel())) - if create_label: - real_tensor = self.Tensor(input.size()).fill_(self.real_label) - self.real_label_var = Variable( - real_tensor, requires_grad=False) - target_tensor = self.real_label_var - else: - create_label = ((self.fake_label_var is None) or - (self.fake_label_var.numel() != input.numel())) - if create_label: - fake_tensor = self.Tensor(input.size()).fill_(self.fake_label) - self.fake_label_var = Variable( - fake_tensor, requires_grad=False) - target_tensor = self.fake_label_var - return target_tensor - - def __call__(self, input, target_is_real): - if isinstance(input[0], list): - loss = 0 - for input_i in input: - pred = input_i[-1] - target_tensor = self.get_target_tensor(pred, target_is_real) - loss += self.loss(pred, target_tensor) - return loss - else: - target_tensor = self.get_target_tensor(input[-1], target_is_real) - return self.loss(input[-1], target_tensor) - - -class VGGLossWarp(nn.Module): - def __init__(self, gpu_ids): - super(VGGLossWarp, self).__init__() - self.vgg = Vgg19().cuda() - self.criterion = nn.L1Loss() - self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0] - - def forward(self, x, y): - x_vgg, y_vgg = self.vgg(x), self.vgg(y) - loss = 0 - loss += self.weights[4] * self.criterion(x_vgg[4], y_vgg[4].detach()) - return loss - - -class VGGLoss(nn.Module): - def __init__(self, gpu_ids): - super(VGGLoss, self).__init__() - self.vgg = Vgg19().cuda() - self.criterion = nn.L1Loss() - self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0] - - def forward(self, x, y): - x_vgg, y_vgg = self.vgg(x), self.vgg(y) - loss = 0 - for i in range(len(x_vgg)): - loss += self.weights[i] * \ - self.criterion(x_vgg[i], y_vgg[i].detach()) - return loss - - def warp(self, x, y): - x_vgg, y_vgg = self.vgg(x), self.vgg(y) - loss = 0 - loss += self.weights[4] * self.criterion(x_vgg[4], y_vgg[4].detach()) - return loss - - -class StyleLoss(nn.Module): - def __init__(self, gpu_ids): - super(StyleLoss, self).__init__() - self.vgg = Vgg19().cuda() - self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0] - - def forward(self, x, y): - x_vgg, y_vgg = self.vgg(x), self.vgg(y) - loss = 0 - for i in range(len(x_vgg)): - N, C, H, W = x_vgg[i].shape - for n in range(N): - phi_x = x_vgg[i][n] - phi_y = y_vgg[i][n] - phi_x = phi_x.reshape(C, H * W) - phi_y = phi_y.reshape(C, H * W) - G_x = torch.matmul(phi_x, phi_x.t()) / (C * H * W) - G_y = torch.matmul(phi_y, phi_y.t()) / (C * H * W) - loss += torch.sqrt(torch.mean((G_x - G_y) ** 2) - ) * self.weights[i] - return loss - - -############################################################################## -# Generator -############################################################################## - -class PartialConvEncoder(nn.Module): - def __init__(self, input_nc, ngf=32, norm_layer=nn.BatchNorm2d): - super(PartialConvEncoder, self).__init__() - activation = nn.ReLU(True) - self.pad1 = nn.ReflectionPad2d(3) - self.partial_conv1 = PartialConv(input_nc, ngf, kernel_size=7) - self.norm_layer1 = norm_layer(ngf) - self.activation = activation - # down sample - mult = 2 ** 0 - self.down1 = PartialConv( - ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1) - self.norm_layer2 = norm_layer(ngf * mult * 2) - mult = 2 ** 1 - self.down2 = PartialConv( - ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1) - self.norm_layer3 = norm_layer(ngf * mult * 2) - - mult = 2 ** 2 - self.down3 = PartialConv( - ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1) - self.norm_layer4 = norm_layer(ngf * mult * 2) - - mult = 2 ** 3 - self.down4 = PartialConv( - ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1) - self.norm_layer5 = norm_layer(ngf * mult * 2) - - def forward(self, input, mask): - input = self.pad1(input) - mask = self.pad1(mask) - input, mask = self.partial_conv1(input, mask) - input = self.norm_layer1(input) - input = self.activation(input) - - input, mask = self.down1(input, mask) - input = self.norm_layer2(input) - input = self.activation(input) - input, mask = self.down2(input, mask) - input = self.norm_layer3(input) - input = self.activation(input) - input, mask = self.down3(input, mask) - input = self.norm_layer4(input) - input = self.activation(input) - input, mask = self.down4(input, mask) - input = self.norm_layer5(input) - input = self.activation(input) - return input - - -class ConvEncoder(nn.Module): - def __init__(self, input_nc, ngf=32, n_downsampling=4, n_blocks=4, norm_layer=nn.BatchNorm2d, - padding_type='reflect'): - super(ConvEncoder, self).__init__() - activation = nn.ReLU(True) - # print("input_nc",input_nc) - model = [nn.ReflectionPad2d(3), nn.Conv2d( - input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] - # downsample - for i in range(n_downsampling): - stride = 2 - - mult = 2 ** i - model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=stride, padding=1), - norm_layer(ngf * mult * 2), activation] - self.model = nn.Sequential(*model) - - def forward(self, input): - return self.model(input) - - -class AttGenerator(nn.Module): - def __init__(self, output_nc, ngf=32, n_blocks=4, n_downsampling=4, padding_type='reflect'): - super(AttGenerator, self).__init__() - mult = 2 ** n_downsampling - model = [] - for i in range(n_blocks): - model += [ResnetBlock(ngf * mult * 2, - norm_type='in', padding_type=padding_type)] - - self.model = nn.Sequential(*model) - self.upsampling = [] - self.out_channels = [] - self.AttNorm = [] - # upsampling - norm_layer = nn.BatchNorm2d - activation = nn.ReLU(True) - - for i in range(n_downsampling): - mult = 2 ** (n_downsampling - i) - up_module = [nn.ConvTranspose2d(ngf * mult * 2, int(ngf * mult / 2) * 2, kernel_size=3, stride=2, padding=1, - output_padding=1), - norm_layer(int(ngf * mult / 2) * 2), activation - ] - up_module = nn.Sequential(*up_module) - self.upsampling += [up_module] - self.out_channels += [int(ngf * mult / 2) * 2] - self.upsampling = nn.Sequential(*self.upsampling) - - # - self.AttNorm += [AttentionNorm(5, self.out_channels[0], 2, 4)] - self.AttNorm += [AttentionNorm(5, self.out_channels[1], 2, 2)] - self.AttNorm += [AttentionNorm(5, self.out_channels[2], 1, 2)] - self.AttNorm += [AttentionNorm(5, self.out_channels[3], 1, 1)] - self.AttNorm = nn.Sequential(*self.AttNorm) - self.last_conv = [nn.ReflectionPad2d(3), nn.Conv2d( - ngf * 2, output_nc, kernel_size=7, padding=0), nn.Tanh()] - self.last_conv = nn.Sequential(*self.last_conv) - - def forward(self, input, unattended): - up = self.model(unattended) - for i in range(4): - # print(i) - up = self.upsampling[i](up) - if i == 3: - break - up = self.AttNorm[i](input, up) - return self.last_conv(up) - - -class PartialConv(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, stride=1, - padding=0, dilation=1, groups=1, bias=True): - super(PartialConv, self).__init__() - self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size, - stride, padding, dilation, groups, bias) - self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, - stride, padding, dilation, groups, False) - self.input_conv.apply(weights_init) - - torch.nn.init.constant_(self.mask_conv.weight, 1.0) - - # mask is not updated - for param in self.mask_conv.parameters(): - param.requires_grad = False - - def forward(self, input, mask): - # http://masc.cs.gmu.edu/wiki/partialconv - # C(X) = W^T * X + b, C(0) = b, D(M) = 1 * M + 0 = sum(M) - # W^T* (M .* X) / sum(M) + b = [C(M .* X) – C(0)] / D(M) + C(0) - - output = self.input_conv(input * mask) - if self.input_conv.bias is not None: - output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as( - output) - else: - output_bias = torch.zeros_like(output) - - with torch.no_grad(): - output_mask = self.mask_conv(mask) - - no_update_holes = output_mask == 0 - mask_sum = output_mask.masked_fill_(no_update_holes, 1.0) - - output_pre = (output - output_bias) / mask_sum + output_bias - output = output_pre.masked_fill_(no_update_holes, 0.0) - - new_mask = torch.ones_like(output) - new_mask = new_mask.masked_fill_(no_update_holes, 0.0) - - return output, new_mask - - -class AttentionNorm(nn.Module): - def __init__(self, ref_channels, out_channels, first_rate, second_rate): - super(AttentionNorm, self).__init__() - self.first = first_rate - self.second = second_rate - mid_channels = int(out_channels / 2) - self.conv_1time_f = nn.Conv2d( - ref_channels, mid_channels, kernel_size=3, stride=1, padding=1) - self.conv_2times_f = nn.Conv2d( - ref_channels, mid_channels, kernel_size=3, stride=2, padding=1) - self.conv_4times_f = nn.Conv2d( - ref_channels, mid_channels, kernel_size=3, stride=4, padding=1) - - self.conv_1time_s = nn.Conv2d( - mid_channels, out_channels, kernel_size=3, stride=1, padding=1) - self.conv_2times_s = nn.Conv2d( - mid_channels, out_channels, kernel_size=3, stride=2, padding=1) - self.conv_4times_s = nn.Conv2d( - mid_channels, out_channels, kernel_size=3, stride=4, padding=1) - - self.conv_1time_m = nn.Conv2d( - mid_channels, out_channels, kernel_size=3, stride=1, padding=1) - self.conv_2times_m = nn.Conv2d( - mid_channels, out_channels, kernel_size=3, stride=2, padding=1) - self.conv_4times_m = nn.Conv2d( - mid_channels, out_channels, kernel_size=3, stride=4, padding=1) - self.norm = nn.BatchNorm2d(out_channels) - self.conv = nn.Conv2d(out_channels, out_channels, - kernel_size=3, stride=1, padding=1) - - def forward(self, input, unattended): - # attention weights - # print(input.shape,unattended.shape) - if self.first == 1: - input = self.conv_1time_f(input) - elif self.first == 2: - input = self.conv_2times_f(input) - elif self.first == 4: - input = self.conv_4times_f(input) - mask = None - if self.second == 1: - bias = self.conv_1time_s(input) - mask = self.conv_1time_m(input) - elif self.second == 2: - bias = self.conv_2times_s(input) - mask = self.conv_2times_m(input) - elif self.second == 4: - bias = self.conv_4times_s(input) - mask = self.conv_4times_m(input) - mask = torch.sigmoid(mask) - attended = self.norm(unattended) - # print(attended.shape,mask.shape,bias.shape) - attended = attended * mask + bias - attended = torch.relu(attended) - attended = self.conv(attended) - output = attended + unattended - return output - - -class UnetMask(nn.Module): - def __init__(self, input_nc, output_nc=3): - super(UnetMask, self).__init__() - self.stn = STNNet() - nl = nn.InstanceNorm2d - self.conv1 = nn.Sequential(*[nn.Conv2d(input_nc, 64, kernel_size=3, stride=1, padding=1), nl(64), nn.ReLU(), - nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nl(64), nn.ReLU()]) - self.pool1 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv2 = nn.Sequential(*[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU(), - nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU()]) - self.pool2 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv3 = nn.Sequential(*[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU(), - nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU()]) - self.pool3 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv4 = nn.Sequential(*[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU(), - nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU()]) - self.drop4 = nn.Dropout(0.5) - self.pool4 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv5 = nn.Sequential(*[nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1), nl(1024), nn.ReLU(), - nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1), nl(1024), nn.ReLU()]) - self.drop5 = nn.Dropout(0.5) - - self.up6 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), nl(512), - nn.ReLU()]) - - self.conv6 = nn.Sequential(*[nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU(), - nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU()]) - self.up7 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1), nl(256), - nn.ReLU()]) - self.conv7 = nn.Sequential(*[nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU(), - nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU()]) - - self.up8 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), nl(128), - nn.ReLU()]) - - self.conv8 = nn.Sequential(*[nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU(), - nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU()]) - - self.up9 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), nl(64), - nn.ReLU()]) - - self.conv9 = nn.Sequential(*[nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), nl(64), nn.ReLU(), - nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nl( - 64), nn.ReLU(), - nn.Conv2d( - 64, output_nc, kernel_size=3, stride=1, padding=1) - ]) - - def forward(self, input, refer, mask): - input, warped_mask, rx, ry, cx, cy = self.stn( - input, torch.cat([mask, refer, input], 1), mask) - # ipdb.set_trace()# print(input.shape) - - conv1 = self.conv1(torch.cat([refer.detach(), input.detach()], 1)) - pool1 = self.pool1(conv1) - - conv2 = self.conv2(pool1) - pool2 = self.pool2(conv2) - - conv3 = self.conv3(pool2) - pool3 = self.pool3(conv3) - - conv4 = self.conv4(pool3) - drop4 = self.drop4(conv4) - pool4 = self.pool4(drop4) - - conv5 = self.conv5(pool4) - drop5 = self.drop5(conv5) - - up6 = self.up6(drop5) - conv6 = self.conv6(torch.cat([drop4, up6], 1)) - - up7 = self.up7(conv6) - conv7 = self.conv7(torch.cat([conv3, up7], 1)) - - up8 = self.up8(conv7) - conv8 = self.conv8(torch.cat([conv2, up8], 1)) - - up9 = self.up9(conv8) - conv9 = self.conv9(torch.cat([conv1, up9], 1)) - return conv9, input, warped_mask, rx, ry, cx, cy - - -class Unet(nn.Module): - def __init__(self, input_nc, output_nc=3): - super(Unet, self).__init__() - self.stn = STNNet() - nl = nn.InstanceNorm2d - self.conv1 = nn.Sequential(*[nn.Conv2d(input_nc, 64, kernel_size=3, stride=1, padding=1), nl(64), nn.ReLU(), - nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nl(64), nn.ReLU()]) - self.pool1 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv2 = nn.Sequential(*[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU(), - nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU()]) - self.pool2 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv3 = nn.Sequential(*[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU(), - nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU()]) - self.pool3 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv4 = nn.Sequential(*[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU(), - nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU()]) - self.drop4 = nn.Dropout(0.5) - self.pool4 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv5 = nn.Sequential(*[nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1), nl(1024), nn.ReLU(), - nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1), nl(1024), nn.ReLU()]) - self.drop5 = nn.Dropout(0.5) - - self.up6 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), nl(512), - nn.ReLU()]) - - self.conv6 = nn.Sequential(*[nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU(), - nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU()]) - self.up7 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1), nl(256), - nn.ReLU()]) - self.conv7 = nn.Sequential(*[nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU(), - nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU()]) - - self.up8 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), nl(128), - nn.ReLU()]) - - self.conv8 = nn.Sequential(*[nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU(), - nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU()]) - - self.up9 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), nl(64), - nn.ReLU()]) - - self.conv9 = nn.Sequential(*[nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), nl(64), nn.ReLU(), - nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nl( - 64), nn.ReLU(), - nn.Conv2d( - 64, output_nc, kernel_size=3, stride=1, padding=1) - ]) - - def forward(self, input, refer, mask): - input, warped_mask, rx, ry, cx, cy = self.stn( - input, torch.cat([mask, refer, input], 1), mask) - # ipdb.set_trace() - conv1 = self.conv1(torch.cat([refer.detach(), input.detach()], 1)) - pool1 = self.pool1(conv1) - - conv2 = self.conv2(pool1) - pool2 = self.pool2(conv2) - - conv3 = self.conv3(pool2) - pool3 = self.pool3(conv3) - - conv4 = self.conv4(pool3) - drop4 = self.drop4(conv4) - pool4 = self.pool4(drop4) - - conv5 = self.conv5(pool4) - drop5 = self.drop5(conv5) - - up6 = self.up6(drop5) - conv6 = self.conv6(torch.cat([drop4, up6], 1)) - - up7 = self.up7(conv6) - conv7 = self.conv7(torch.cat([conv3, up7], 1)) - - up8 = self.up8(conv7) - conv8 = self.conv8(torch.cat([conv2, up8], 1)) - - up9 = self.up9(conv8) - conv9 = self.conv9(torch.cat([conv1, up9], 1)) - return conv9, input, warped_mask, rx, ry, cx, cy - - def refine(self, input): - conv1 = self.conv1(input) - pool1 = self.pool1(conv1) - - conv2 = self.conv2(pool1) - pool2 = self.pool2(conv2) - - conv3 = self.conv3(pool2) - pool3 = self.pool3(conv3) - - conv4 = self.conv4(pool3) - drop4 = self.drop4(conv4) - pool4 = self.pool4(drop4) - - conv5 = self.conv5(pool4) - drop5 = self.drop5(conv5) - - up6 = self.up6(drop5) - conv6 = self.conv6(torch.cat([drop4, up6], 1)) - - up7 = self.up7(conv6) - conv7 = self.conv7(torch.cat([conv3, up7], 1)) - - up8 = self.up8(conv7) - conv8 = self.conv8(torch.cat([conv2, up8], 1)) - - up9 = self.up9(conv8) - conv9 = self.conv9(torch.cat([conv1, up9], 1)) - return conv9 - - -class Refine(nn.Module): - def __init__(self, input_nc, output_nc=3): - super(Refine, self).__init__() - nl = nn.InstanceNorm2d - self.conv1 = nn.Sequential(*[nn.Conv2d(input_nc, 64, kernel_size=3, stride=1, padding=1), nl(64), nn.ReLU(), - nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nl(64), nn.ReLU()]) - self.pool1 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv2 = nn.Sequential(*[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU(), - nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU()]) - self.pool2 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv3 = nn.Sequential(*[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU(), - nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU()]) - self.pool3 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv4 = nn.Sequential(*[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU(), - nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU()]) - self.drop4 = nn.Dropout(0.5) - self.pool4 = nn.MaxPool2d(kernel_size=(2, 2)) - - self.conv5 = nn.Sequential(*[nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=1), nl(1024), nn.ReLU(), - nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1), nl(1024), nn.ReLU()]) - self.drop5 = nn.Dropout(0.5) - - self.up6 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), nl(512), - nn.ReLU()]) - - self.conv6 = nn.Sequential(*[nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU(), - nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1), nl(512), nn.ReLU()]) - self.up7 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1), nl(256), - nn.ReLU()]) - self.conv7 = nn.Sequential(*[nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU(), - nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), nl(256), nn.ReLU()]) - - self.up8 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), nl(128), - nn.ReLU()]) - - self.conv8 = nn.Sequential(*[nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU(), - nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1), nl(128), nn.ReLU()]) - - self.up9 = nn.Sequential( - *[nn.UpsamplingNearest2d(scale_factor=2), nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), nl(64), - nn.ReLU()]) - - self.conv9 = nn.Sequential(*[nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), nl(64), nn.ReLU(), - nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nl( - 64), nn.ReLU(), - nn.Conv2d( - 64, output_nc, kernel_size=3, stride=1, padding=1) - ]) - - def refine(self, input): - conv1 = self.conv1(input) - pool1 = self.pool1(conv1) - - conv2 = self.conv2(pool1) - pool2 = self.pool2(conv2) - - conv3 = self.conv3(pool2) - pool3 = self.pool3(conv3) - - conv4 = self.conv4(pool3) - drop4 = self.drop4(conv4) - pool4 = self.pool4(drop4) - - conv5 = self.conv5(pool4) - drop5 = self.drop5(conv5) - - up6 = self.up6(drop5) - conv6 = self.conv6(torch.cat([drop4, up6], 1)) - - up7 = self.up7(conv6) - conv7 = self.conv7(torch.cat([conv3, up7], 1)) - - up8 = self.up8(conv7) - conv8 = self.conv8(torch.cat([conv2, up8], 1)) - - up9 = self.up9(conv8) - conv9 = self.conv9(torch.cat([conv1, up9], 1)) - return conv9 - - -class GlobalGenerator(nn.Module): - def __init__(self, input_nc, output_nc, L, S, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, - padding_type='reflect'): - assert (n_blocks >= 0) - super(GlobalGenerator, self).__init__() - activation = nn.ReLU(True) - - model = [nn.ReflectionPad2d(3), nn.Conv2d( - input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] - # downsample - for i in range(n_downsampling): - mult = 2 ** i - model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), - norm_layer(ngf * mult * 2), activation] - - # resnet blocks - mult = 2 ** n_downsampling - for i in range(n_blocks): - model += [ResnetBlock(ngf * mult, norm_type='adain', - padding_type=padding_type)] - # upsample - for i in range(n_downsampling): - mult = 2 ** (n_downsampling - i) - model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, - output_padding=1), - norm_layer(int(ngf * mult / 2)), activation] - model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, - output_nc, kernel_size=7, padding=0)] - self.model = nn.Sequential(*model) - - # style encoder - self.enc_style = StyleEncoder(5, S, 16, self.get_num_adain_params(self.model), norm='none', activ='relu', - pad_type='reflect') - # label encoder - self.enc_label = LabelEncoder( - 5, L, 16, 64, norm='none', activ='relu', pad_type='reflect') - - def assign_adain_params(self, adain_params, model): - # assign the adain_params to the AdaIN layers in model - for m in model.modules(): - if m.__class__.__name__ == "AdaptiveInstanceNorm2d": - mean = adain_params[:, :m.num_features] - std = adain_params[:, m.num_features:2 * m.num_features] - m.bias = mean.contiguous().view(-1) - m.weight = std.contiguous().view(-1) - if adain_params.size(1) > 2 * m.num_features: - adain_params = adain_params[:, 2 * m.num_features:] - - def get_num_adain_params(self, model): - # return the number of AdaIN parameters needed by the model - num_adain_params = 0 - for m in model.modules(): - if m.__class__.__name__ == "AdaptiveInstanceNorm2d": - num_adain_params += 2 * m.num_features - return num_adain_params - - def forward(self, input, input_ref, image_ref): - fea1, fea2 = self.enc_label(input_ref) - adain_params = self.enc_style((image_ref, fea1, fea2)) - self.assign_adain_params(adain_params, self.model) - return self.model(input) - - -class BlendGenerator(nn.Module): - def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=3, norm_layer=nn.BatchNorm2d, - padding_type='reflect'): - assert (n_blocks >= 0) - super(BlendGenerator, self).__init__() - activation = nn.ReLU(True) - - model = [nn.ReflectionPad2d(3), nn.Conv2d( - input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] - # downsample - for i in range(n_downsampling): - mult = 2 ** i - model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), - norm_layer(ngf * mult * 2), activation] - - # resnet blocks - mult = 2 ** n_downsampling - for i in range(n_blocks): - model += [ResnetBlock(ngf * mult, norm_type='in', - padding_type=padding_type)] - - # upsample - for i in range(n_downsampling): - mult = 2 ** (n_downsampling - i) - model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, - output_padding=1), - norm_layer(int(ngf * mult / 2)), activation] - model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, - output_nc, kernel_size=7, padding=0), nn.Sigmoid()] - self.model = nn.Sequential(*model) - - def forward(self, input1, input2): - m = self.model(torch.cat([input1, input2], 1)) - return input1 * m + input2 * (1 - m), m - - # Define the Multiscale Discriminator. - - -class MultiscaleDiscriminator(nn.Module): - def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, - use_sigmoid=False, num_D=3, getIntermFeat=False): - super(MultiscaleDiscriminator, self).__init__() - self.num_D = num_D - self.n_layers = n_layers - self.getIntermFeat = getIntermFeat - - for i in range(num_D): - netD = NLayerDiscriminator( - input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat) - if getIntermFeat: - for j in range(n_layers + 2): - setattr(self, 'scale' + str(i) + '_layer' + - str(j), getattr(netD, 'model' + str(j))) - else: - setattr(self, 'layer' + str(i), netD.model) - - self.downsample = nn.AvgPool2d( - 3, stride=2, padding=[1, 1], count_include_pad=False) - - def singleD_forward(self, model, input): - if self.getIntermFeat: - result = [input] - for i in range(len(model)): - result.append(model[i](result[-1])) - return result[1:] - else: - return [model(input)] - - def forward(self, input): - num_D = self.num_D - result = [] - input_downsampled = input - for i in range(num_D): - if self.getIntermFeat: - model = [getattr(self, 'scale' + str(num_D - 1 - i) + '_layer' + str(j)) for j in - range(self.n_layers + 2)] - else: - model = getattr(self, 'layer' + str(num_D - 1 - i)) - result.append(self.singleD_forward(model, input_downsampled)) - if i != (num_D - 1): - input_downsampled = self.downsample(input_downsampled) - return result - - -# Define the PatchGAN discriminator with the specified arguments. -class NLayerDiscriminator(nn.Module): - def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False): - super(NLayerDiscriminator, self).__init__() - self.getIntermFeat = getIntermFeat - self.n_layers = n_layers - - kw = 4 - padw = int(np.ceil((kw - 1.0) / 2)) - sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, - stride=2, padding=padw), nn.LeakyReLU(0.2, True)]] - - nf = ndf - for n in range(1, n_layers): - nf_prev = nf - nf = min(nf * 2, 512) - sequence += [[ - nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw), - norm_layer(nf), nn.LeakyReLU(0.2, True) - ]] - - nf_prev = nf - nf = min(nf * 2, 512) - sequence += [[ - nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw), - norm_layer(nf), - nn.LeakyReLU(0.2, True) - ]] - - sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, - stride=1, padding=padw)]] - - if use_sigmoid: - sequence += [[nn.Sigmoid()]] - - if getIntermFeat: - for n in range(len(sequence)): - setattr(self, 'model' + str(n), nn.Sequential(*sequence[n])) - else: - sequence_stream = [] - for n in range(len(sequence)): - sequence_stream += sequence[n] - self.model = nn.Sequential(*sequence_stream) - - def forward(self, input): - if self.getIntermFeat: - res = [input] - for n in range(self.n_layers + 2): - model = getattr(self, 'model' + str(n)) - res.append(model(res[-1])) - return res[1:] - else: - return self.model(input) - - -class Vgg19(torch.nn.Module): - def __init__(self, requires_grad=False): - super(Vgg19, self).__init__() - vgg = models.vgg19(pretrained=False) - vgg.load_state_dict(torch.load(os.path.dirname( - os.path.realpath(__file__)) + "/vgg19-dcbb9e9d.pth")) - vgg_pretrained_features = vgg.features - self.vgg = vgg - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - for x in range(2): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(2, 7): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(7, 12): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(12, 21): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(21, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h_relu1 = self.slice1(X) - h_relu2 = self.slice2(h_relu1) - h_relu3 = self.slice3(h_relu2) - h_relu4 = self.slice4(h_relu3) - h_relu5 = self.slice5(h_relu4) - out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] - return out - - def extract(self, x): - x = self.vgg.features(x) - x = self.vgg.avgpool(x) - return x - - -# Define the MaskVAE -class VAE(nn.Module): - def __init__(self, nc, ngf, ndf, latent_variable_size): - super(VAE, self).__init__() - # self.cuda = True - self.nc = nc - self.ngf = ngf - self.ndf = ndf - self.latent_variable_size = latent_variable_size - - # encoder - self.e1 = nn.Conv2d(nc, ndf, 4, 2, 1) - self.bn1 = nn.BatchNorm2d(ndf) - - self.e2 = nn.Conv2d(ndf, ndf * 2, 4, 2, 1) - self.bn2 = nn.BatchNorm2d(ndf * 2) - - self.e3 = nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1) - self.bn3 = nn.BatchNorm2d(ndf * 4) - - self.e4 = nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1) - self.bn4 = nn.BatchNorm2d(ndf * 8) - - self.e5 = nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1) - self.bn5 = nn.BatchNorm2d(ndf * 16) - - self.e6 = nn.Conv2d(ndf * 16, ndf * 32, 4, 2, 1) - self.bn6 = nn.BatchNorm2d(ndf * 32) - - self.e7 = nn.Conv2d(ndf * 32, ndf * 64, 4, 2, 1) - self.bn7 = nn.BatchNorm2d(ndf * 64) - - self.fc1 = nn.Linear(ndf * 64 * 4 * 4, latent_variable_size) - self.fc2 = nn.Linear(ndf * 64 * 4 * 4, latent_variable_size) - - # decoder - self.d1 = nn.Linear(latent_variable_size, ngf * 64 * 4 * 4) - - self.up1 = nn.UpsamplingNearest2d(scale_factor=2) - self.pd1 = nn.ReplicationPad2d(1) - self.d2 = nn.Conv2d(ngf * 64, ngf * 32, 3, 1) - self.bn8 = nn.BatchNorm2d(ngf * 32, 1.e-3) - - self.up2 = nn.UpsamplingNearest2d(scale_factor=2) - self.pd2 = nn.ReplicationPad2d(1) - self.d3 = nn.Conv2d(ngf * 32, ngf * 16, 3, 1) - self.bn9 = nn.BatchNorm2d(ngf * 16, 1.e-3) - - self.up3 = nn.UpsamplingNearest2d(scale_factor=2) - self.pd3 = nn.ReplicationPad2d(1) - self.d4 = nn.Conv2d(ngf * 16, ngf * 8, 3, 1) - self.bn10 = nn.BatchNorm2d(ngf * 8, 1.e-3) - - self.up4 = nn.UpsamplingNearest2d(scale_factor=2) - self.pd4 = nn.ReplicationPad2d(1) - self.d5 = nn.Conv2d(ngf * 8, ngf * 4, 3, 1) - self.bn11 = nn.BatchNorm2d(ngf * 4, 1.e-3) - - self.up5 = nn.UpsamplingNearest2d(scale_factor=2) - self.pd5 = nn.ReplicationPad2d(1) - self.d6 = nn.Conv2d(ngf * 4, ngf * 2, 3, 1) - self.bn12 = nn.BatchNorm2d(ngf * 2, 1.e-3) - - self.up6 = nn.UpsamplingNearest2d(scale_factor=2) - self.pd6 = nn.ReplicationPad2d(1) - self.d7 = nn.Conv2d(ngf * 2, ngf, 3, 1) - self.bn13 = nn.BatchNorm2d(ngf, 1.e-3) - - self.up7 = nn.UpsamplingNearest2d(scale_factor=2) - self.pd7 = nn.ReplicationPad2d(1) - self.d8 = nn.Conv2d(ngf, nc, 3, 1) - - self.leakyrelu = nn.LeakyReLU(0.2) - self.relu = nn.ReLU() - # self.sigmoid = nn.Sigmoid() - self.maxpool = nn.MaxPool2d((2, 2), (2, 2)) - - def encode(self, x): - h1 = self.leakyrelu(self.bn1(self.e1(x))) - h2 = self.leakyrelu(self.bn2(self.e2(h1))) - h3 = self.leakyrelu(self.bn3(self.e3(h2))) - h4 = self.leakyrelu(self.bn4(self.e4(h3))) - h5 = self.leakyrelu(self.bn5(self.e5(h4))) - h6 = self.leakyrelu(self.bn6(self.e6(h5))) - h7 = self.leakyrelu(self.bn7(self.e7(h6))) - h7 = h7.view(-1, self.ndf * 64 * 4 * 4) - return self.fc1(h7), self.fc2(h7) - - def reparametrize(self, mu, logvar): - std = logvar.mul(0.5).exp_() - # if self.cuda: - eps = torch.cuda.FloatTensor(std.size()).normal_() - # else: - # eps = torch.FloatTensor(std.size()).normal_() - eps = Variable(eps) - return eps.mul(std).add_(mu) - - def decode(self, z): - h1 = self.relu(self.d1(z)) - h1 = h1.view(-1, self.ngf * 64, 4, 4) - h2 = self.leakyrelu(self.bn8(self.d2(self.pd1(self.up1(h1))))) - h3 = self.leakyrelu(self.bn9(self.d3(self.pd2(self.up2(h2))))) - h4 = self.leakyrelu(self.bn10(self.d4(self.pd3(self.up3(h3))))) - h5 = self.leakyrelu(self.bn11(self.d5(self.pd4(self.up4(h4))))) - h6 = self.leakyrelu(self.bn12(self.d6(self.pd5(self.up5(h5))))) - h7 = self.leakyrelu(self.bn13(self.d7(self.pd6(self.up6(h6))))) - return self.d8(self.pd7(self.up7(h7))) - - def get_latent_var(self, x): - mu, logvar = self.encode(x) - z = self.reparametrize(mu, logvar) - return z, mu, logvar.mul(0.5).exp_() - - def forward(self, x): - mu, logvar = self.encode(x) - z = self.reparametrize(mu, logvar) - res = self.decode(z) - - return res, x, mu, logvar - - -# style encode part -class StyleEncoder(nn.Module): - def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, pad_type): - super(StyleEncoder, self).__init__() - self.model = [] - self.model_middle = [] - self.model_last = [] - self.model += [ConvBlock(input_dim, dim, 7, 1, 3, - norm=norm, activation=activ, pad_type=pad_type)] - for i in range(2): - self.model += [ConvBlock(dim, 2 * dim, 4, 2, 1, - norm=norm, activation=activ, pad_type=pad_type)] - dim *= 2 - for i in range(n_downsample - 2): - self.model_middle += [ConvBlock(dim, dim, 4, 2, 1, - norm=norm, activation=activ, pad_type=pad_type)] - self.model_last += [nn.AdaptiveAvgPool2d(1)] # global average pooling - self.model_last += [nn.Conv2d(dim, style_dim, 1, 1, 0)] - - self.model = nn.Sequential(*self.model) - self.model_middle = nn.Sequential(*self.model_middle) - self.model_last = nn.Sequential(*self.model_last) - - self.output_dim = dim - - self.sft1 = SFTLayer() - self.sft2 = SFTLayer() - - def forward(self, x): - fea = self.model(x[0]) - fea = self.sft1((fea, x[1])) - fea = self.model_middle(fea) - fea = self.sft2((fea, x[2])) - return self.model_last(fea) - - -# label encode part -class LabelEncoder(nn.Module): - def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, pad_type): - super(LabelEncoder, self).__init__() - self.model = [] - self.model_last = [nn.ReLU()] - self.model += [ConvBlock(input_dim, dim, 7, 1, 3, - norm=norm, activation=activ, pad_type=pad_type)] - self.model += [ConvBlock(dim, 2 * dim, 4, 2, 1, - norm=norm, activation=activ, pad_type=pad_type)] - dim *= 2 - self.model += [ConvBlock(dim, 2 * dim, 4, 2, 1, - norm=norm, activation='none', pad_type=pad_type)] - dim *= 2 - for i in range(n_downsample - 3): - self.model_last += [ConvBlock(dim, dim, 4, 2, 1, - norm=norm, activation=activ, pad_type=pad_type)] - self.model_last += [ConvBlock(dim, dim, 4, 2, 1, - norm=norm, activation='none', pad_type=pad_type)] - self.model = nn.Sequential(*self.model) - self.model_last = nn.Sequential(*self.model_last) - self.output_dim = dim - - def forward(self, x): - fea = self.model(x) - return fea, self.model_last(fea) - - -# Define the basic block -class ConvBlock(nn.Module): - def __init__(self, input_dim, output_dim, kernel_size, stride, - padding=0, norm='none', activation='relu', pad_type='zero'): - super(ConvBlock, self).__init__() - self.use_bias = True - # initialize padding - if pad_type == 'reflect': - self.pad = nn.ReflectionPad2d(padding) - elif pad_type == 'replicate': - self.pad = nn.ReplicationPad2d(padding) - elif pad_type == 'zero': - self.pad = nn.ZeroPad2d(padding) - else: - assert 0, "Unsupported padding type: {}".format(pad_type) - - # initialize normalization - norm_dim = output_dim - if norm == 'bn': - self.norm = nn.BatchNorm2d(norm_dim) - elif norm == 'in': - # self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True) - self.norm = nn.InstanceNorm2d(norm_dim) - elif norm == 'ln': - self.norm = LayerNorm(norm_dim) - elif norm == 'adain': - self.norm = AdaptiveInstanceNorm2d(norm_dim) - elif norm == 'none' or norm == 'sn': - self.norm = None - else: - assert 0, "Unsupported normalization: {}".format(norm) - - # initialize activation - if activation == 'relu': - self.activation = nn.ReLU(inplace=True) - elif activation == 'lrelu': - self.activation = nn.LeakyReLU(0.2, inplace=True) - elif activation == 'prelu': - self.activation = nn.PReLU() - elif activation == 'selu': - self.activation = nn.SELU(inplace=True) - elif activation == 'tanh': - self.activation = nn.Tanh() - elif activation == 'none': - self.activation = None - else: - assert 0, "Unsupported activation: {}".format(activation) - - # initialize convolution - if norm == 'sn': - self.conv = SpectralNorm( - nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) - else: - self.conv = nn.Conv2d(input_dim, output_dim, - kernel_size, stride, bias=self.use_bias) - - def forward(self, x): - x = self.conv(self.pad(x)) - if self.norm: - x = self.norm(x) - if self.activation: - x = self.activation(x) - return x - - -class LinearBlock(nn.Module): - def __init__(self, input_dim, output_dim, norm='none', activation='relu'): - super(LinearBlock, self).__init__() - use_bias = True - # initialize fully connected layer - if norm == 'sn': - self.fc = SpectralNorm( - nn.Linear(input_dim, output_dim, bias=use_bias)) - else: - self.fc = nn.Linear(input_dim, output_dim, bias=use_bias) - - # initialize normalization - norm_dim = output_dim - if norm == 'bn': - self.norm = nn.BatchNorm1d(norm_dim) - elif norm == 'in': - self.norm = nn.InstanceNorm1d(norm_dim) - elif norm == 'ln': - self.norm = LayerNorm(norm_dim) - elif norm == 'none' or norm == 'sn': - self.norm = None - else: - assert 0, "Unsupported normalization: {}".format(norm) - - # initialize activation - if activation == 'relu': - self.activation = nn.ReLU(inplace=True) - elif activation == 'lrelu': - self.activation = nn.LeakyReLU(0.2, inplace=True) - elif activation == 'prelu': - self.activation = nn.PReLU() - elif activation == 'selu': - self.activation = nn.SELU(inplace=True) - elif activation == 'tanh': - self.activation = nn.Tanh() - elif activation == 'none': - self.activation = None - else: - assert 0, "Unsupported activation: {}".format(activation) - - def forward(self, x): - out = self.fc(x) - if self.norm: - out = self.norm(out) - if self.activation: - out = self.activation(out) - return out - - -# Define a resnet block -class ResnetBlock(nn.Module): - def __init__(self, dim, norm_type, padding_type, use_dropout=False): - super(ResnetBlock, self).__init__() - self.conv_block = self.build_conv_block( - dim, norm_type, padding_type, use_dropout) - - def build_conv_block(self, dim, norm_type, padding_type, use_dropout): - conv_block = [] - conv_block += [ConvBlock(dim, dim, 3, 1, 1, norm=norm_type, - activation='relu', pad_type=padding_type)] - conv_block += [ConvBlock(dim, dim, 3, 1, 1, norm=norm_type, - activation='none', pad_type=padding_type)] - - return nn.Sequential(*conv_block) - - def forward(self, x): - out = x + self.conv_block(x) - return out - - -class SFTLayer(nn.Module): - def __init__(self): - super(SFTLayer, self).__init__() - self.SFT_scale_conv1 = nn.Conv2d(64, 64, 1) - self.SFT_scale_conv2 = nn.Conv2d(64, 64, 1) - self.SFT_shift_conv1 = nn.Conv2d(64, 64, 1) - self.SFT_shift_conv2 = nn.Conv2d(64, 64, 1) - - def forward(self, x): - scale = self.SFT_scale_conv2(F.leaky_relu( - self.SFT_scale_conv1(x[1]), 0.1, inplace=True)) - shift = self.SFT_shift_conv2(F.leaky_relu( - self.SFT_shift_conv1(x[1]), 0.1, inplace=True)) - return x[0] * scale + shift - - -class ConvBlock_SFT(nn.Module): - def __init__(self, dim, norm_type, padding_type, use_dropout=False): - super(ResnetBlock_SFT, self).__init__() - self.sft1 = SFTLayer() - self.conv1 = ConvBlock( - dim, dim, 4, 2, 1, norm=norm_type, activation='none', pad_type=padding_type) - - def forward(self, x): - fea = self.sft1((x[0], x[1])) - fea = F.relu(self.conv1(fea), inplace=True) - return (x[0] + fea, x[1]) - - -class ConvBlock_SFT_last(nn.Module): - def __init__(self, dim, norm_type, padding_type, use_dropout=False): - super(ResnetBlock_SFT_last, self).__init__() - self.sft1 = SFTLayer() - self.conv1 = ConvBlock( - dim, dim, 4, 2, 1, norm=norm_type, activation='none', pad_type=padding_type) - - def forward(self, x): - fea = self.sft1((x[0], x[1])) - fea = F.relu(self.conv1(fea), inplace=True) - return x[0] + fea - - -# Definition of normalization layer -class AdaptiveInstanceNorm2d(nn.Module): - def __init__(self, num_features, eps=1e-5, momentum=0.1): - super(AdaptiveInstanceNorm2d, self).__init__() - self.num_features = num_features - self.eps = eps - self.momentum = momentum - # weight and bias are dynamically assigned - self.weight = None - self.bias = None - # just dummy buffers, not used - self.register_buffer('running_mean', torch.zeros(num_features)) - self.register_buffer('running_var', torch.ones(num_features)) - - def forward(self, x): - assert self.weight is not None and self.bias is not None, "Please assign weight and bias before calling AdaIN!" - b, c = x.size(0), x.size(1) - running_mean = self.running_mean.repeat(b) - running_var = self.running_var.repeat(b) - - # Apply instance norm - x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) - - out = F.batch_norm( - x_reshaped, running_mean, running_var, self.weight, self.bias, - True, self.momentum, self.eps) - - return out.view(b, c, *x.size()[2:]) - - def __repr__(self): - return self.__class__.__name__ + '(' + str(self.num_features) + ')' - - -class LayerNorm(nn.Module): - def __init__(self, num_features, eps=1e-5, affine=True): - super(LayerNorm, self).__init__() - self.num_features = num_features - self.affine = affine - self.eps = eps - - if self.affine: - self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) - self.beta = nn.Parameter(torch.zeros(num_features)) - - def forward(self, x): - shape = [-1] + [1] * (x.dim() - 1) - # print(x.size()) - if x.size(0) == 1: - # These two lines run much faster in pytorch 0.4 than the two lines listed below. - mean = x.view(-1).mean().view(*shape) - std = x.view(-1).std().view(*shape) - else: - mean = x.view(x.size(0), -1).mean(1).view(*shape) - std = x.view(x.size(0), -1).std(1).view(*shape) - - x = (x - mean) / (std + self.eps) - - if self.affine: - shape = [1, -1] + [1] * (x.dim() - 2) - x = x * self.gamma.view(*shape) + self.beta.view(*shape) - return x - - -def l2normalize(v, eps=1e-12): - return v / (v.norm() + eps) - - -class SpectralNorm(nn.Module): - """ - Based on the paper "Spectral Normalization for Generative Adversarial Networks" by Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida - and the Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan - """ - - def __init__(self, module, name='weight', power_iterations=1): - super(SpectralNorm, self).__init__() - self.module = module - self.name = name - self.power_iterations = power_iterations - if not self._made_params(): - self._make_params() - - def _update_u_v(self): - u = getattr(self.module, self.name + "_u") - v = getattr(self.module, self.name + "_v") - w = getattr(self.module, self.name + "_bar") - - height = w.data.shape[0] - for _ in range(self.power_iterations): - v.data = l2normalize( - torch.mv(torch.t(w.view(height, -1).data), u.data)) - u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data)) - - # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data)) - sigma = u.dot(w.view(height, -1).mv(v)) - setattr(self.module, self.name, w / sigma.expand_as(w)) - - def _made_params(self): - try: - u = getattr(self.module, self.name + "_u") - v = getattr(self.module, self.name + "_v") - w = getattr(self.module, self.name + "_bar") - return True - except AttributeError: - return False - - def _make_params(self): - w = getattr(self.module, self.name) - - height = w.data.shape[0] - width = w.view(height, -1).data.shape[1] - - u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False) - v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False) - u.data = l2normalize(u.data) - v.data = l2normalize(v.data) - w_bar = nn.Parameter(w.data) - - del self.module._parameters[self.name] - - self.module.register_parameter(self.name + "_u", u) - self.module.register_parameter(self.name + "_v", v) - self.module.register_parameter(self.name + "_bar", w_bar) - - def forward(self, *args): - self._update_u_v() - return self.module.forward(*args) - - -# STN TPS - -class CNN(nn.Module): - def __init__(self, num_output, input_nc=5, ngf=8, n_layers=5, norm_layer=nn.InstanceNorm2d, use_dropout=False): - super(CNN, self).__init__() - downconv = nn.Conv2d(5, ngf, kernel_size=4, stride=2, padding=1) - model = [downconv, nn.ReLU(True), norm_layer(ngf)] - for i in range(n_layers): - in_ngf = 2 ** i * ngf if 2 ** i * ngf < 1024 else 1024 - out_ngf = 2 ** (i + 1) * ngf if 2 ** i * ngf < 1024 else 1024 - downconv = nn.Conv2d( - in_ngf, out_ngf, kernel_size=4, stride=2, padding=1) - model += [downconv, norm_layer(out_ngf), nn.ReLU(True)] - model += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), - norm_layer(64), nn.ReLU(True)] - model += [nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1), - norm_layer(64), nn.ReLU(True)] - self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) - self.model = nn.Sequential(*model) - self.fc1 = nn.Linear(512, 128) - self.fc2 = nn.Linear(128, num_output) - - def forward(self, x): - x = self.model(x) - x = self.maxpool(x) - x = x.view(x.shape[0], -1) - x = F.relu(self.fc1(x)) - x = F.dropout(x, training=self.training) - x = self.fc2(x) - - return x - - -class ClsNet(nn.Module): - - def __init__(self): - super(ClsNet, self).__init__() - self.cnn = CNN(10) - - def forward(self, x): - return F.log_softmax(self.cnn(x)) - - -class BoundedGridLocNet(nn.Module): - - def __init__(self, grid_height, grid_width, target_control_points): - super(BoundedGridLocNet, self).__init__() - self.cnn = CNN(grid_height * grid_width * 2) - - bias = torch.from_numpy(np.arctanh(target_control_points.numpy())) - bias = bias.view(-1) - self.cnn.fc2.bias.data.copy_(bias) - self.cnn.fc2.weight.data.zero_() - - def forward(self, x): - batch_size = x.size(0) - points = F.tanh(self.cnn(x)) - # ipdb.set_trace() - coor = points.view(batch_size, -1, 2) - row = self.get_row(coor, 5) - col = self.get_col(coor, 5) - rx, ry, cx, cy = torch.tensor(0.08).cuda(), torch.tensor( - 0.08).cuda(), torch.tensor(0.08).cuda(), torch.tensor(0.08).cuda() - row_x, row_y = row[:, :, 0], row[:, :, 1] - col_x, col_y = col[:, :, 0], col[:, :, 1] - rx_loss = torch.max(rx, row_x).mean() - ry_loss = torch.max(ry, row_y).mean() - cx_loss = torch.max(cx, col_x).mean() - cy_loss = torch.max(cy, col_y).mean() - - return coor, rx_loss, ry_loss, cx_loss, cy_loss - - def get_row(self, coor, num): - sec_dic = [] - for j in range(num): - sum = 0 - buffer = 0 - flag = False - max = -1 - for i in range(num-1): - differ = (coor[:, j*num+i+1, :]-coor[:, j*num+i, :])**2 - if not flag: - second_dif = 0 - flag = True - else: - second_dif = torch.abs(differ-buffer) - sec_dic.append(second_dif) - - buffer = differ - sum += second_dif - return torch.stack(sec_dic, dim=1) - - def get_col(self, coor, num): - sec_dic = [] - for i in range(num): - sum = 0 - buffer = 0 - flag = False - max = -1 - for j in range(num - 1): - differ = (coor[:, (j+1) * num + i, :] - - coor[:, j * num + i, :]) ** 2 - if not flag: - second_dif = 0 - flag = True - else: - second_dif = torch.abs(differ-buffer) - sec_dic.append(second_dif) - buffer = differ - sum += second_dif - return torch.stack(sec_dic, dim=1) - - -class UnBoundedGridLocNet(nn.Module): - - def __init__(self, grid_height, grid_width, target_control_points): - super(UnBoundedGridLocNet, self).__init__() - self.cnn = CNN(grid_height * grid_width * 2) - - bias = target_control_points.view(-1) - self.cnn.fc2.bias.data.copy_(bias) - self.cnn.fc2.weight.data.zero_() - - def forward(self, x): - batch_size = x.size(0) - points = self.cnn(x) - return points.view(batch_size, -1, 2) - - -class STNNet(nn.Module): - - def __init__(self): - super(STNNet, self).__init__() - range = 0.9 - r1 = range - r2 = range - grid_size_h = 5 - grid_size_w = 5 - - assert r1 < 1 and r2 < 1 # if >= 1, arctanh will cause error in BoundedGridLocNet - target_control_points = torch.Tensor(list(itertools.product( - np.arange(-r1, r1 + 0.00001, 2.0 * r1 / (grid_size_h - 1)), - np.arange(-r2, r2 + 0.00001, 2.0 * r2 / (grid_size_w - 1)), - ))) - # ipdb.set_trace() - Y, X = target_control_points.split(1, dim=1) - target_control_points = torch.cat([X, Y], dim=1) - # self.get_row(target_control_points,5) - GridLocNet = { - 'unbounded_stn': UnBoundedGridLocNet, - 'bounded_stn': BoundedGridLocNet, - }['bounded_stn'] - self.loc_net = GridLocNet( - grid_size_h, grid_size_w, target_control_points) - - self.tps = TPSGridGen(256, 192, target_control_points) - - def get_row(self, coor, num): - for j in range(num): - sum = 0 - buffer = 0 - flag = False - max = -1 - for i in range(num - 1): - differ = (coor[j * num + i + 1, :] - coor[j * num + i, :]) ** 2 - if not flag: - second_dif = 0 - flag = True - else: - second_dif = torch.abs(differ - buffer) - - buffer = differ - sum += second_dif - print(sum / num) - - def get_col(self, coor, num): - for i in range(num): - sum = 0 - buffer = 0 - flag = False - max = -1 - for j in range(num - 1): - differ = (coor[(j + 1) * num + i, :] - - coor[j * num + i, :]) ** 2 - if not flag: - second_dif = 0 - flag = True - else: - second_dif = torch.abs(differ-buffer) - - buffer = differ - sum += second_dif - print(sum) - - def forward(self, x, reference, mask): - batch_size = x.size(0) - source_control_points, rx, ry, cx, cy = self.loc_net(reference) - source_control_points = (source_control_points) - # print('control points',source_control_points.shape) - source_coordinate = self.tps(source_control_points) - grid = source_coordinate.view(batch_size, 256, 192, 2) - # print('grid size',grid.shape) - transformed_x = grid_sample(x, grid, canvas=0) - warped_mask = grid_sample(mask, grid, canvas=0) - return transformed_x, warped_mask, rx, ry, cx, cy diff --git a/spaces/hdhzk/bingo/cloudflare/worker.js b/spaces/hdhzk/bingo/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/hdhzk/bingo/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/hebert2099/MusicGen/tests/common_utils/wav_utils.py b/spaces/hebert2099/MusicGen/tests/common_utils/wav_utils.py deleted file mode 100644 index d3a563ee1749a58217ece55c9a08b8d93c0fc386..0000000000000000000000000000000000000000 --- a/spaces/hebert2099/MusicGen/tests/common_utils/wav_utils.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from pathlib import Path -import typing as tp - -import torch -import torchaudio - - -def get_white_noise(chs: int = 1, num_frames: int = 1): - wav = torch.randn(chs, num_frames) - return wav - - -def get_batch_white_noise(bs: int = 1, chs: int = 1, num_frames: int = 1): - wav = torch.randn(bs, chs, num_frames) - return wav - - -def save_wav(path: str, wav: torch.Tensor, sample_rate: int): - fp = Path(path) - kwargs: tp.Dict[str, tp.Any] = {} - if fp.suffix == '.wav': - kwargs['encoding'] = 'PCM_S' - kwargs['bits_per_sample'] = 16 - elif fp.suffix == '.mp3': - kwargs['compression'] = 320 - torchaudio.save(str(fp), wav, sample_rate, **kwargs) diff --git a/spaces/hf4all/web-ui/_next/static/chunks/c6a0d165.06a9e7e9a00c85b9.js b/spaces/hf4all/web-ui/_next/static/chunks/c6a0d165.06a9e7e9a00c85b9.js deleted file mode 100644 index 4b6f0e0dae825086e31eaea629cdc539648b9a34..0000000000000000000000000000000000000000 --- a/spaces/hf4all/web-ui/_next/static/chunks/c6a0d165.06a9e7e9a00c85b9.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[130],{9735:function(a,t,n){n.d(t,{Zg$:function(){return h}});var c=n(83270);function h(a){return(0,c.w_)({tag:"svg",attr:{viewBox:"0 0 24 24"},child:[{tag:"path",attr:{d:"M0 3.75A.75.75 0 0 1 .75 3h7.497c1.566 0 2.945.8 3.751 2.014A4.495 4.495 0 0 1 15.75 3h7.5a.75.75 0 0 1 .75.75v15.063a.752.752 0 0 1-.755.75l-7.682-.052a3 3 0 0 0-2.142.878l-.89.891a.75.75 0 0 1-1.061 0l-.902-.901a2.996 2.996 0 0 0-2.121-.879H.75a.75.75 0 0 1-.75-.75Zm12.75 15.232a4.503 4.503 0 0 1 2.823-.971l6.927.047V4.5h-6.75a3 3 0 0 0-3 3ZM11.247 7.497a3 3 0 0 0-3-2.997H1.5V18h6.947c1.018 0 2.006.346 2.803.98Z"}}]})(a)}}}]); \ No newline at end of file diff --git a/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/dataset.py b/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/dataset.py deleted file mode 100644 index 2092eb4e4f9aa2c32da1c6f6cd9b0c512989450f..0000000000000000000000000000000000000000 --- a/spaces/hlydecker/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/dataset.py +++ /dev/null @@ -1,740 +0,0 @@ -import time -from dataclasses import dataclass -from datetime import datetime -from functools import reduce -import json -import os -from pathlib import Path -import re -import requests -from requests.models import MissingSchema -import sys -from typing import List, Optional, Tuple, Dict, Callable, Any - -from bs4 import BeautifulSoup -import docx -from html2text import html2text -import langchain -from langchain.callbacks import get_openai_callback -from langchain.cache import SQLiteCache -from langchain.chains import LLMChain -from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT -from langchain.chat_models import ChatOpenAI -from langchain.chat_models.base import BaseChatModel -from langchain.document_loaders import PyPDFLoader, PyMuPDFLoader -from langchain.embeddings.base import Embeddings -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.llms import OpenAI -from langchain.llms.base import LLM, BaseLLM -from langchain.prompts.chat import AIMessagePromptTemplate -from langchain.text_splitter import TokenTextSplitter, RecursiveCharacterTextSplitter -from langchain.vectorstores import Pinecone as OriginalPinecone -import numpy as np -import openai -import pinecone -from pptx import Presentation -from pypdf import PdfReader -import trafilatura - -from streamlit_langchain_chat.constants import * -from streamlit_langchain_chat.customized_langchain.vectorstores import FAISS -from streamlit_langchain_chat.customized_langchain.vectorstores import Pinecone -from streamlit_langchain_chat.utils import maybe_is_text, maybe_is_truncated -from streamlit_langchain_chat.prompts import * - - -if REUSE_ANSWERS: - CACHE_PATH = TEMP_DIR / "llm_cache.db" - os.makedirs(os.path.dirname(CACHE_PATH), exist_ok=True) - langchain.llm_cache = SQLiteCache(str(CACHE_PATH)) - -# option 1 -TextSplitter = TokenTextSplitter -# option 2 -# TextSplitter = RecursiveCharacterTextSplitter # usado por gpt4_pdf_chatbot_langchain (aka GPCL) - - -@dataclass -class Answer: - """A class to hold the answer to a question.""" - question: str = "" - answer: str = "" - context: str = "" - chunks: str = "" - packages: List[Any] = None - references: str = "" - cost_str: str = "" - passages: Dict[str, str] = None - tokens: List[Dict] = None - - def __post_init__(self): - """Initialize the answer.""" - if self.packages is None: - self.packages = [] - if self.passages is None: - self.passages = {} - - def __str__(self) -> str: - """Return the answer as a string.""" - return self.answer - - -def parse_docx(path, citation, key, chunk_chars=2000, overlap=50): - try: - document = docx.Document(path) - fullText = [] - for paragraph in document.paragraphs: - fullText.append(paragraph.text) - doc = '\n'.join(fullText) + '\n' - except Exception as e: - print(f"code_error: {e}") - sys.exit(1) - - if doc: - text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - texts = text_splitter.split_text(doc) - return texts, [dict(citation=citation, dockey=key, key=key)] * len(texts) - else: - return [], [] - - -# TODO: si pones un conector con el formato loader = ... ; data = loader.load(); -# podrás poner todos los conectores de langchain -# https://langchain.readthedocs.io/en/stable/modules/document_loaders/examples/pdf.html -def parse_pdf(path, citation, key, chunk_chars=2000, overlap=50): - pdfFileObj = open(path, "rb") - pdfReader = PdfReader(pdfFileObj) - splits = [] - split = "" - pages = [] - metadatas = [] - for i, page in enumerate(pdfReader.pages): - split += page.extract_text() - pages.append(str(i + 1)) - # split could be so long it needs to be split - # into multiple chunks. Or it could be so short - # that it needs to be combined with the next chunk. - while len(split) > chunk_chars: - splits.append(split[:chunk_chars]) - # pretty formatting of pages (e.g. 1-3, 4, 5-7) - pg = "-".join([pages[0], pages[-1]]) - metadatas.append( - dict( - citation=citation, - dockey=key, - key=f"{key} pages {pg}", - ) - ) - split = split[chunk_chars - overlap:] - pages = [str(i + 1)] - if len(split) > overlap: - splits.append(split[:chunk_chars]) - pg = "-".join([pages[0], pages[-1]]) - metadatas.append( - dict( - citation=citation, - dockey=key, - key=f"{key} pages {pg}", - ) - ) - pdfFileObj.close() - - # # ### option 2. PyPDFLoader - # loader = PyPDFLoader(path) - # data = loader.load_and_split() - # # ### option 2.1. PyPDFLoader usado por GPCL, aunque luego usa el - # loader = PyPDFLoader(path) - # rawDocs = loader.load() - # text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - # texts = text_splitter.split_documents(rawDocs) - # # ### option 3. PDFMiner. Este parece la mejor opcion - # loader = PyMuPDFLoader(path) - # data = loader.load() - return splits, metadatas - - -def parse_pptx(path, citation, key, chunk_chars=2000, overlap=50): - try: - presentation = Presentation(path) - fullText = [] - for slide in presentation.slides: - for shape in slide.shapes: - if hasattr(shape, "text"): - fullText.append(shape.text) - doc = ''.join(fullText) - - if doc: - text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - texts = text_splitter.split_text(doc) - return texts, [dict(citation=citation, dockey=key, key=key)] * len(texts) - else: - return [], [] - - except Exception as e: - print(f"code_error: {e}") - sys.exit(1) - - -def parse_txt(path, citation, key, chunk_chars=2000, overlap=50, html=False): - try: - with open(path) as f: - doc = f.read() - except UnicodeDecodeError as e: - with open(path, encoding="utf-8", errors="ignore") as f: - doc = f.read() - if html: - doc = html2text(doc) - # yo, no idea why but the texts are not split correctly - text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - texts = text_splitter.split_text(doc) - return texts, [dict(citation=citation, dockey=key, key=key)] * len(texts) - - -def parse_url(url: str, citation, key, chunk_chars=2000, overlap=50): - def beautifulsoup_extract_text_fallback(response_content): - """ - This is a fallback function, so that we can always return a value for text content. - Even for when both Trafilatura and BeautifulSoup are unable to extract the text from a - single URL. - """ - - # Create the beautifulsoup object: - soup = BeautifulSoup(response_content, 'html.parser') - - # Finding the text: - text = soup.find_all(text=True) - - # Remove unwanted tag elements: - cleaned_text = '' - blacklist = [ - '[document]', - 'noscript', - 'header', - 'html', - 'meta', - 'head', - 'input', - 'script', - 'style', ] - - # Then we will loop over every item in the extract text and make sure that the beautifulsoup4 tag - # is NOT in the blacklist - for item in text: - if item.parent.name not in blacklist: - cleaned_text += f'{item} ' # cleaned_text += '{} '.format(item) - - # Remove any tab separation and strip the text: - cleaned_text = cleaned_text.replace('\t', '') - return cleaned_text.strip() - - def extract_text_from_single_web_page(url): - print(f"\n===========\n{url=}\n===========\n") - downloaded_url = trafilatura.fetch_url(url) - a = None - try: - a = trafilatura.extract(downloaded_url, - output_format='json', - with_metadata=True, - include_comments=False, - date_extraction_params={'extensive_search': True, - 'original_date': True}) - except AttributeError: - a = trafilatura.extract(downloaded_url, - output_format='json', - with_metadata=True, - date_extraction_params={'extensive_search': True, - 'original_date': True}) - except Exception as e: - print(f"code_error: {e}") - - if a: - json_output = json.loads(a) - return json_output['text'] - else: - try: - headers = {'User-Agent': 'Chrome/83.0.4103.106'} - resp = requests.get(url, headers=headers) - print(f"{resp=}\n") - # We will only extract the text from successful requests: - if resp.status_code == 200: - return beautifulsoup_extract_text_fallback(resp.content) - else: - # This line will handle for any failures in both the Trafilature and BeautifulSoup4 functions: - return np.nan - # Handling for any URLs that don't have the correct protocol - except MissingSchema: - return np.nan - - text_to_split = extract_text_from_single_web_page(url) - text_splitter = TextSplitter(chunk_size=chunk_chars, chunk_overlap=overlap) - texts = text_splitter.split_text(text_to_split) - return texts, [dict(citation=citation, dockey=key, key=key)] * len(texts) - - -def read_source(path: str = None, - citation: str = None, - key: str = None, - chunk_chars: int = 3000, - overlap: int = 100, - disable_check: bool = False): - if path.endswith(".pdf"): - return parse_pdf(path, citation, key, chunk_chars, overlap) - elif path.endswith(".txt"): - return parse_txt(path, citation, key, chunk_chars, overlap) - elif path.endswith(".html"): - return parse_txt(path, citation, key, chunk_chars, overlap, html=True) - elif path.endswith(".docx"): - return parse_docx(path, citation, key, chunk_chars, overlap) - elif path.endswith(".pptx"): - return parse_pptx(path, citation, key, chunk_chars, overlap) - elif path.startswith("http://") or path.startswith("https://"): - return parse_url(path, citation, key, chunk_chars, overlap) - # TODO: poner mas conectores - # else: - # return parse_code_txt(path, citation, key, chunk_chars, overlap) - else: - raise "unknown extension" - - -class Dataset: - """A collection of documents to be used for answering questions.""" - def __init__( - self, - chunk_size_limit: int = 3000, - llm: Optional[BaseLLM] | Optional[BaseChatModel] = None, - summary_llm: Optional[BaseLLM] = None, - name: str = "default", - index_path: Optional[Path] = None, - ) -> None: - """Initialize the collection of documents. - - Args: - chunk_size_limit: The maximum number of characters to use for a single chunk of text. - llm: The language model to use for answering questions. Default - OpenAI chat-gpt-turbo - summary_llm: The language model to use for summarizing documents. If None, llm is used. - name: The name of the collection. - index_path: The path to the index file IF pickled. If None, defaults to using name in $HOME/.paperqa/name - """ - self.docs = dict() - self.keys = set() - self.chunk_size_limit = chunk_size_limit - - self.index_docstore = None - - if llm is None: - llm = ChatOpenAI(temperature=0.1, max_tokens=512) - if summary_llm is None: - summary_llm = llm - self.update_llm(llm, summary_llm) - - if index_path is None: - index_path = TEMP_DIR / name - self.index_path = index_path - self.name = name - - def update_llm(self, llm: BaseLLM | ChatOpenAI, summary_llm: Optional[BaseLLM] = None) -> None: - """Update the LLM for answering questions.""" - self.llm = llm - if summary_llm is None: - summary_llm = llm - self.summary_llm = summary_llm - self.summary_chain = LLMChain(prompt=chat_summary_prompt, llm=summary_llm) - self.search_chain = LLMChain(prompt=search_prompt, llm=llm) - self.cite_chain = LLMChain(prompt=citation_prompt, llm=llm) - - def add( - self, - path: str, - citation: Optional[str] = None, - key: Optional[str] = None, - disable_check: bool = False, - chunk_chars: Optional[int] = 3000, - ) -> None: - """Add a document to the collection.""" - - if path in self.docs: - print(f"Document {path} already in collection.") - return None - - if citation is None: - # peak first chunk - texts, _ = read_source(path, "", "", chunk_chars=chunk_chars) - with get_openai_callback() as cb: - citation = self.cite_chain.run(texts[0]) - if len(citation) < 3 or "Unknown" in citation or "insufficient" in citation: - citation = f"Unknown, {os.path.basename(path)}, {datetime.now().year}" - - if key is None: - # get first name and year from citation - try: - author = re.search(r"([A-Z][a-z]+)", citation).group(1) - except AttributeError: - # panicking - no word?? - raise ValueError( - f"Could not parse key from citation {citation}. Consider just passing key explicitly - e.g. docs.py (path, citation, key='mykey')" - ) - try: - year = re.search(r"(\d{4})", citation).group(1) - except AttributeError: - year = "" - key = f"{author}{year}" - suffix = "" - while key + suffix in self.keys: - # move suffix to next letter - if suffix == "": - suffix = "a" - else: - suffix = chr(ord(suffix) + 1) - key += suffix - self.keys.add(key) - - texts, metadata = read_source(path, citation, key, chunk_chars=chunk_chars) - # loose check to see if document was loaded - # - if len("".join(texts)) < 10 or ( - not disable_check and not maybe_is_text("".join(texts)) - ): - raise ValueError( - f"This does not look like a text document: {path}. Path disable_check to ignore this error." - ) - - self.docs[path] = dict(texts=texts, metadata=metadata, key=key) - if self.index_docstore is not None: - self.index_docstore.add_texts(texts, metadatas=metadata) - - def clear(self) -> None: - """Clear the collection of documents.""" - self.docs = dict() - self.keys = set() - self.index_docstore = None - # delete index file - pkl = self.index_path / "index.pkl" - if pkl.exists(): - pkl.unlink() - fs = self.index_path / "index.faiss" - if fs.exists(): - fs.unlink() - - @property - def doc_previews(self) -> List[Tuple[int, str, str]]: - """Return a list of tuples of (key, citation) for each document.""" - return [ - ( - len(doc["texts"]), - doc["metadata"][0]["dockey"], - doc["metadata"][0]["citation"], - ) - for doc in self.docs.values() - ] - - # to pickle, we have to save the index as a file - def __getstate__(self, embedding: Embeddings): - if embedding is None: - embedding = OpenAIEmbeddings() - if self.index_docstore is None and len(self.docs) > 0: - self._build_faiss_index(embedding) - state = self.__dict__.copy() - if self.index_docstore is not None: - state["_index"].save_local(self.index_path) - del state["_index"] - # remove LLMs (they can have callbacks, which can't be pickled) - del state["summary_chain"] - del state["qa_chain"] - del state["cite_chain"] - del state["search_chain"] - return state - - def __setstate__(self, state): - self.__dict__.update(state) - try: - self.index_docstore = FAISS.load_local(self.index_path, OpenAIEmbeddings()) - except: - # they use some special exception type, but I don't want to import it - self.index_docstore = None - self.update_llm( - ChatOpenAI(temperature=0.1, max_tokens=512) - ) - - def _build_faiss_index(self, embedding: Embeddings = None): - if embedding is None: - embedding = OpenAIEmbeddings() - if self.index_docstore is None: - texts = reduce( - lambda x, y: x + y, [doc["texts"] for doc in self.docs.values()], [] - ) - metadatas = reduce( - lambda x, y: x + y, [doc["metadata"] for doc in self.docs.values()], [] - ) - - # if the index exists, load it - if LOAD_INDEX_LOCALLY and (self.index_path / "index.faiss").exists(): - self.index_docstore = FAISS.load_local(self.index_path, embedding) - - # search if the text and metadata already existed in the index - for i in reversed(range(len(texts))): - text = texts[i] - metadata = metadatas[i] - for key, value in self.index_docstore.docstore.dict_.items(): - if value.page_content == text: - if value.metadata.get('citation').split(os.sep)[-1] != metadata.get('citation').split(os.sep)[-1]: - self.index_docstore.docstore.dict_[key].metadata['citation'] = metadata.get('citation').split(os.sep)[-1] - self.index_docstore.docstore.dict_[key].metadata['dockey'] = metadata.get('citation').split(os.sep)[-1] - self.index_docstore.docstore.dict_[key].metadata['key'] = metadata.get('citation').split(os.sep)[-1] - texts.pop(i) - metadatas.pop(i) - - # add remaining texts - if texts: - self.index_docstore.add_texts(texts=texts, metadatas=metadatas) - else: - # crete new index - self.index_docstore = FAISS.from_texts(texts, embedding, metadatas=metadatas) - # - - if SAVE_INDEX_LOCALLY: - # save index. - self.index_docstore.save_local(self.index_path) - - def _build_pinecone_index(self, embedding: Embeddings = None): - if embedding is None: - embedding = OpenAIEmbeddings() - if self.index_docstore is None: - pinecone.init( - api_key=os.environ['PINECONE_API_KEY'], # find at app.pinecone.io - environment=os.environ['PINECONE_ENVIRONMENT'] # next to api key in console - ) - texts = reduce( - lambda x, y: x + y, [doc["texts"] for doc in self.docs.values()], [] - ) - metadatas = reduce( - lambda x, y: x + y, [doc["metadata"] for doc in self.docs.values()], [] - ) - - # TODO: que cuando exista que no lo borre, sino que lo actualice - # index_name = "langchain-demo1" - # if index_name in pinecone.list_indexes(): - # self.index_docstore = pinecone.Index(index_name) - # vectors = [] - # for text, metadata in zip(texts, metadatas): - # # embed = - # self.index_docstore.upsert(vectors=vectors) - # else: - # if openai.api_type == 'azure': - # self.index_docstore = Pinecone.from_texts(texts, embedding, metadatas=metadatas, index_name=index_name) - # else: - # self.index_docstore = OriginalPinecone.from_texts(texts, embedding, metadatas=metadatas, index_name=index_name) - - index_name = "langchain-demo1" - - # if the index exists, delete it - if index_name in pinecone.list_indexes(): - pinecone.delete_index(index_name) - - # create new index - if openai.api_type == 'azure': - self.index_docstore = Pinecone.from_texts(texts, embedding, metadatas=metadatas, index_name=index_name) - else: - self.index_docstore = OriginalPinecone.from_texts(texts, embedding, metadatas=metadatas, index_name=index_name) - - def get_evidence( - self, - answer: Answer, - embedding: Embeddings, - k: int = 3, - max_sources: int = 5, - marginal_relevance: bool = True, - ) -> str: - if self.index_docstore is None: - self._build_faiss_index(embedding) - - init_search_time = time.time() - - # want to work through indices but less k - if marginal_relevance: - docs = self.index_docstore.max_marginal_relevance_search( - answer.question, k=k, fetch_k=5 * k - ) - else: - docs = self.index_docstore.similarity_search( - answer.question, k=k, fetch_k=5 * k - ) - if OPERATING_MODE == "debug": - print(f"time to search docs to build context: {time.time() - init_search_time:.2f} [s]") - init_summary_time = time.time() - partial_summary_time = "" - for i, doc in enumerate(docs): - with get_openai_callback() as cb: - init__partial_summary_time = time.time() - summary_of_chunked_text = self.summary_chain.run( - question=answer.question, context_str=doc.page_content - ) - if OPERATING_MODE == "debug": - partial_summary_time += f"- time to make relevant summary of doc '{i}': {time.time() - init__partial_summary_time:.2f} [s]\n" - engine = self.summary_chain.llm.model_kwargs.get('deployment_id') or self.summary_chain.llm.model_name - if not answer.tokens: - answer.tokens = [{ - 'engine': engine, - 'total_tokens': cb.total_tokens}] - else: - answer.tokens.append({ - 'engine': engine, - 'total_tokens': cb.total_tokens - }) - summarized_package = ( - doc.metadata["key"], - doc.metadata["citation"], - summary_of_chunked_text, - doc.page_content, - ) - if "Not applicable" not in summary_of_chunked_text and summarized_package not in answer.packages: - answer.packages.append(summarized_package) - yield answer - if len(answer.packages) == max_sources: - break - if OPERATING_MODE == "debug": - print(f"time to make all relevant summaries: {time.time() - init_summary_time:.2f} [s]") - # no se printea el ultimo caracter porque es un \n - print(partial_summary_time[:-1]) - context_str = "\n\n".join( - [f"{citation}: {summary_of_chunked_text}" - for key, citation, summary_of_chunked_text, chunked_text in answer.packages - if "Not applicable" not in summary_of_chunked_text] - ) - chunks_str = "\n\n".join( - [f"{citation}: {chunked_text}" - for key, citation, summary_of_chunked_text, chunked_text in answer.packages - if "Not applicable" not in summary_of_chunked_text] - ) - valid_keys = [key - for key, citation, summary_of_chunked_text, chunked_textin in answer.packages - if "Not applicable" not in summary_of_chunked_text] - if len(valid_keys) > 0: - context_str += "\n\nValid keys: " + ", ".join(valid_keys) - chunks_str += "\n\nValid keys: " + ", ".join(valid_keys) - answer.context = context_str - answer.chunks = chunks_str - yield answer - - def query( - self, - query: str, - embedding: Embeddings, - chat_history: list[tuple[str, str]], - k: int = 10, - max_sources: int = 5, - length_prompt: str = "about 100 words", - marginal_relevance: bool = True, - ): - for answer in self._query( - query, - embedding, - chat_history, - k=k, - max_sources=max_sources, - length_prompt=length_prompt, - marginal_relevance=marginal_relevance, - ): - pass - return answer - - def _query( - self, - query: str, - embedding: Embeddings, - chat_history: list[tuple[str, str]], - k: int, - max_sources: int, - length_prompt: str, - marginal_relevance: bool, - ): - if k < max_sources: - k = max_sources + 1 - - answer = Answer(question=query) - - messages_qa = [system_message_prompt] - if len(chat_history) != 0: - for conversation in chat_history: - messages_qa.append(HumanMessagePromptTemplate.from_template(conversation[0])) - messages_qa.append(AIMessagePromptTemplate.from_template(conversation[1])) - messages_qa.append(human_qa_message_prompt) - chat_qa_prompt = ChatPromptTemplate.from_messages(messages_qa) - self.qa_chain = LLMChain(prompt=chat_qa_prompt, llm=self.llm) - - for answer in self.get_evidence( - answer, - embedding, - k=k, - max_sources=max_sources, - marginal_relevance=marginal_relevance, - ): - yield answer - - references_dict = dict() - passages = dict() - if len(answer.context) < 10: - answer_text = "I cannot answer this question due to insufficient information." - else: - with get_openai_callback() as cb: - init_qa_time = time.time() - answer_text = self.qa_chain.run( - question=answer.question, context_str=answer.context, length=length_prompt - ) - if OPERATING_MODE == "debug": - print(f"time to make the Q&A answer: {time.time() - init_qa_time:.2f} [s]") - engine = self.qa_chain.llm.model_kwargs.get('deployment_id') or self.qa_chain.llm.model_name - if not answer.tokens: - answer.tokens = [{ - 'engine': engine, - 'total_tokens': cb.total_tokens}] - else: - answer.tokens.append({ - 'engine': engine, - 'total_tokens': cb.total_tokens - }) - - # it still happens lol - if "(Foo2012)" in answer_text: - answer_text = answer_text.replace("(Foo2012)", "") - for key, citation, summary, text in answer.packages: - # do check for whole key (so we don't catch Callahan2019a with Callahan2019) - skey = key.split(" ")[0] - if skey + " " in answer_text or skey + ")" in answer_text: - references_dict[skey] = citation - passages[key] = text - references_str = "\n\n".join( - [f"{i+1}. ({k}): {c}" for i, (k, c) in enumerate(references_dict.items())] - ) - - # cost_str = f"{answer_text}\n\n" - cost_str = "" - itemized_cost = "" - total_amount = 0 - for d in answer.tokens: - total_tokens = d.get('total_tokens') - if total_tokens: - engine = d.get('engine') - key_price = None - for key in PRICES.keys(): - if re.match(f"{key}", engine): - key_price = key - break - if PRICES.get(key_price): - partial_amount = total_tokens / 1000 * PRICES.get(key_price) - total_amount += partial_amount - itemized_cost += f"- {engine}: {total_tokens} tokens\t ---> ${partial_amount:.4f},\n" - else: - itemized_cost += f"- {engine}: {total_tokens} tokens,\n" - # delete ,\n - itemized_cost = itemized_cost[:-2] - - # add tokens to formatted answer - cost_str += f"Total cost: ${total_amount:.4f}\nItemized cost:\n{itemized_cost}" - - answer.answer = answer_text - answer.cost_str = cost_str - answer.references = references_str - answer.passages = passages - yield answer - - diff --git a/spaces/huak95/personaGPT_custom/frontend/README.md b/spaces/huak95/personaGPT_custom/frontend/README.md deleted file mode 100644 index 965a1228cf6c9add1218e0adef73bb6ee230fe7f..0000000000000000000000000000000000000000 --- a/spaces/huak95/personaGPT_custom/frontend/README.md +++ /dev/null @@ -1,38 +0,0 @@ -This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). - -## Getting Started - -First, run the development server: - -```bash -npm run dev -# or -yarn dev -# or -pnpm dev -``` - -Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. - -You can start editing the page by modifying `pages/index.tsx`. The page auto-updates as you edit the file. - -[API routes](https://nextjs.org/docs/api-routes/introduction) can be accessed on [http://localhost:3000/api/hello](http://localhost:3000/api/hello). This endpoint can be edited in `pages/api/hello.ts`. - -The `pages/api` directory is mapped to `/api/*`. Files in this directory are treated as [API routes](https://nextjs.org/docs/api-routes/introduction) instead of React pages. - -This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. - -## Learn More - -To learn more about Next.js, take a look at the following resources: - -- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. -- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. - -You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! - -## Deploy on Vercel - -The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. - -Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details. diff --git a/spaces/huang4414/anime-remove-background/README.md b/spaces/huang4414/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/huang4414/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/huggan/sefa/models/pggan_discriminator.py b/spaces/huggan/sefa/models/pggan_discriminator.py deleted file mode 100644 index c1bc97fa513eb3075d0b6bd1df775236cacf396a..0000000000000000000000000000000000000000 --- a/spaces/huggan/sefa/models/pggan_discriminator.py +++ /dev/null @@ -1,402 +0,0 @@ -# python3.7 -"""Contains the implementation of discriminator described in PGGAN. - -Paper: https://arxiv.org/pdf/1710.10196.pdf - -Official TensorFlow implementation: -https://github.com/tkarras/progressive_growing_of_gans -""" - -import numpy as np - -import torch -import torch.nn as nn -import torch.nn.functional as F - -__all__ = ['PGGANDiscriminator'] - -# Resolutions allowed. -_RESOLUTIONS_ALLOWED = [8, 16, 32, 64, 128, 256, 512, 1024] - -# Initial resolution. -_INIT_RES = 4 - -# Default gain factor for weight scaling. -_WSCALE_GAIN = np.sqrt(2.0) - - -class PGGANDiscriminator(nn.Module): - """Defines the discriminator network in PGGAN. - - NOTE: The discriminator takes images with `RGB` channel order and pixel - range [-1, 1] as inputs. - - Settings for the network: - - (1) resolution: The resolution of the input image. - (2) image_channels: Number of channels of the input image. (default: 3) - (3) label_size: Size of the additional label for conditional generation. - (default: 0) - (4) fused_scale: Whether to fused `conv2d` and `downsample` together, - resulting in `conv2d` with strides. (default: False) - (5) use_wscale: Whether to use weight scaling. (default: True) - (6) minibatch_std_group_size: Group size for the minibatch standard - deviation layer. 0 means disable. (default: 16) - (7) fmaps_base: Factor to control number of feature maps for each layer. - (default: 16 << 10) - (8) fmaps_max: Maximum number of feature maps in each layer. (default: 512) - """ - - def __init__(self, - resolution, - image_channels=3, - label_size=0, - fused_scale=False, - use_wscale=True, - minibatch_std_group_size=16, - fmaps_base=16 << 10, - fmaps_max=512): - """Initializes with basic settings. - - Raises: - ValueError: If the `resolution` is not supported. - """ - super().__init__() - - if resolution not in _RESOLUTIONS_ALLOWED: - raise ValueError(f'Invalid resolution: `{resolution}`!\n' - f'Resolutions allowed: {_RESOLUTIONS_ALLOWED}.') - - self.init_res = _INIT_RES - self.init_res_log2 = int(np.log2(self.init_res)) - self.resolution = resolution - self.final_res_log2 = int(np.log2(self.resolution)) - self.image_channels = image_channels - self.label_size = label_size - self.fused_scale = fused_scale - self.use_wscale = use_wscale - self.minibatch_std_group_size = minibatch_std_group_size - self.fmaps_base = fmaps_base - self.fmaps_max = fmaps_max - - # Level of detail (used for progressive training). - self.register_buffer('lod', torch.zeros(())) - self.pth_to_tf_var_mapping = {'lod': 'lod'} - - for res_log2 in range(self.final_res_log2, self.init_res_log2 - 1, -1): - res = 2 ** res_log2 - block_idx = self.final_res_log2 - res_log2 - - # Input convolution layer for each resolution. - self.add_module( - f'input{block_idx}', - ConvBlock(in_channels=self.image_channels, - out_channels=self.get_nf(res), - kernel_size=1, - padding=0, - use_wscale=self.use_wscale)) - self.pth_to_tf_var_mapping[f'input{block_idx}.weight'] = ( - f'FromRGB_lod{block_idx}/weight') - self.pth_to_tf_var_mapping[f'input{block_idx}.bias'] = ( - f'FromRGB_lod{block_idx}/bias') - - # Convolution block for each resolution (except the last one). - if res != self.init_res: - self.add_module( - f'layer{2 * block_idx}', - ConvBlock(in_channels=self.get_nf(res), - out_channels=self.get_nf(res), - use_wscale=self.use_wscale)) - tf_layer0_name = 'Conv0' - self.add_module( - f'layer{2 * block_idx + 1}', - ConvBlock(in_channels=self.get_nf(res), - out_channels=self.get_nf(res // 2), - downsample=True, - fused_scale=self.fused_scale, - use_wscale=self.use_wscale)) - tf_layer1_name = 'Conv1_down' if self.fused_scale else 'Conv1' - - # Convolution block for last resolution. - else: - self.add_module( - f'layer{2 * block_idx}', - ConvBlock( - in_channels=self.get_nf(res), - out_channels=self.get_nf(res), - use_wscale=self.use_wscale, - minibatch_std_group_size=self.minibatch_std_group_size)) - tf_layer0_name = 'Conv' - self.add_module( - f'layer{2 * block_idx + 1}', - DenseBlock(in_channels=self.get_nf(res) * res * res, - out_channels=self.get_nf(res // 2), - use_wscale=self.use_wscale)) - tf_layer1_name = 'Dense0' - - self.pth_to_tf_var_mapping[f'layer{2 * block_idx}.weight'] = ( - f'{res}x{res}/{tf_layer0_name}/weight') - self.pth_to_tf_var_mapping[f'layer{2 * block_idx}.bias'] = ( - f'{res}x{res}/{tf_layer0_name}/bias') - self.pth_to_tf_var_mapping[f'layer{2 * block_idx + 1}.weight'] = ( - f'{res}x{res}/{tf_layer1_name}/weight') - self.pth_to_tf_var_mapping[f'layer{2 * block_idx + 1}.bias'] = ( - f'{res}x{res}/{tf_layer1_name}/bias') - - # Final dense block. - self.add_module( - f'layer{2 * block_idx + 2}', - DenseBlock(in_channels=self.get_nf(res // 2), - out_channels=1 + self.label_size, - use_wscale=self.use_wscale, - wscale_gain=1.0, - activation_type='linear')) - self.pth_to_tf_var_mapping[f'layer{2 * block_idx + 2}.weight'] = ( - f'{res}x{res}/Dense1/weight') - self.pth_to_tf_var_mapping[f'layer{2 * block_idx + 2}.bias'] = ( - f'{res}x{res}/Dense1/bias') - - self.downsample = DownsamplingLayer() - - def get_nf(self, res): - """Gets number of feature maps according to current resolution.""" - return min(self.fmaps_base // res, self.fmaps_max) - - def forward(self, image, lod=None, **_unused_kwargs): - expected_shape = (self.image_channels, self.resolution, self.resolution) - if image.ndim != 4 or image.shape[1:] != expected_shape: - raise ValueError(f'The input tensor should be with shape ' - f'[batch_size, channel, height, width], where ' - f'`channel` equals to {self.image_channels}, ' - f'`height`, `width` equal to {self.resolution}!\n' - f'But `{image.shape}` is received!') - - lod = self.lod.cpu().tolist() if lod is None else lod - if lod + self.init_res_log2 > self.final_res_log2: - raise ValueError(f'Maximum level-of-detail (lod) is ' - f'{self.final_res_log2 - self.init_res_log2}, ' - f'but `{lod}` is received!') - - lod = self.lod.cpu().tolist() - for res_log2 in range(self.final_res_log2, self.init_res_log2 - 1, -1): - block_idx = current_lod = self.final_res_log2 - res_log2 - if current_lod <= lod < current_lod + 1: - x = self.__getattr__(f'input{block_idx}')(image) - elif current_lod - 1 < lod < current_lod: - alpha = lod - np.floor(lod) - x = (self.__getattr__(f'input{block_idx}')(image) * alpha + - x * (1 - alpha)) - if lod < current_lod + 1: - x = self.__getattr__(f'layer{2 * block_idx}')(x) - x = self.__getattr__(f'layer{2 * block_idx + 1}')(x) - if lod > current_lod: - image = self.downsample(image) - x = self.__getattr__(f'layer{2 * block_idx + 2}')(x) - return x - - -class MiniBatchSTDLayer(nn.Module): - """Implements the minibatch standard deviation layer.""" - - def __init__(self, group_size=16, epsilon=1e-8): - super().__init__() - self.group_size = group_size - self.epsilon = epsilon - - def forward(self, x): - if self.group_size <= 1: - return x - group_size = min(self.group_size, x.shape[0]) # [NCHW] - y = x.view(group_size, -1, x.shape[1], x.shape[2], x.shape[3]) # [GMCHW] - y = y - torch.mean(y, dim=0, keepdim=True) # [GMCHW] - y = torch.mean(y ** 2, dim=0) # [MCHW] - y = torch.sqrt(y + self.epsilon) # [MCHW] - y = torch.mean(y, dim=[1, 2, 3], keepdim=True) # [M111] - y = y.repeat(group_size, 1, x.shape[2], x.shape[3]) # [N1HW] - return torch.cat([x, y], dim=1) - - -class DownsamplingLayer(nn.Module): - """Implements the downsampling layer. - - Basically, this layer can be used to downsample feature maps with average - pooling. - """ - - def __init__(self, scale_factor=2): - super().__init__() - self.scale_factor = scale_factor - - def forward(self, x): - if self.scale_factor <= 1: - return x - return F.avg_pool2d(x, - kernel_size=self.scale_factor, - stride=self.scale_factor, - padding=0) - - -class ConvBlock(nn.Module): - """Implements the convolutional block. - - Basically, this block executes minibatch standard deviation layer (if - needed), convolutional layer, activation layer, and downsampling layer ( - if needed) in sequence. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - add_bias=True, - downsample=False, - fused_scale=False, - use_wscale=True, - wscale_gain=_WSCALE_GAIN, - activation_type='lrelu', - minibatch_std_group_size=0): - """Initializes with block settings. - - Args: - in_channels: Number of channels of the input tensor. - out_channels: Number of channels of the output tensor. - kernel_size: Size of the convolutional kernels. (default: 3) - stride: Stride parameter for convolution operation. (default: 1) - padding: Padding parameter for convolution operation. (default: 1) - add_bias: Whether to add bias onto the convolutional result. - (default: True) - downsample: Whether to downsample the result after convolution. - (default: False) - fused_scale: Whether to fused `conv2d` and `downsample` together, - resulting in `conv2d` with strides. (default: False) - use_wscale: Whether to use weight scaling. (default: True) - wscale_gain: Gain factor for weight scaling. (default: _WSCALE_GAIN) - activation_type: Type of activation. Support `linear` and `lrelu`. - (default: `lrelu`) - minibatch_std_group_size: Group size for the minibatch standard - deviation layer. 0 means disable. (default: 0) - - Raises: - NotImplementedError: If the `activation_type` is not supported. - """ - super().__init__() - - if minibatch_std_group_size > 1: - in_channels = in_channels + 1 - self.mbstd = MiniBatchSTDLayer(group_size=minibatch_std_group_size) - else: - self.mbstd = nn.Identity() - - if downsample and not fused_scale: - self.downsample = DownsamplingLayer() - else: - self.downsample = nn.Identity() - - if downsample and fused_scale: - self.use_stride = True - self.stride = 2 - self.padding = 1 - else: - self.use_stride = False - self.stride = stride - self.padding = padding - - weight_shape = (out_channels, in_channels, kernel_size, kernel_size) - fan_in = kernel_size * kernel_size * in_channels - wscale = wscale_gain / np.sqrt(fan_in) - if use_wscale: - self.weight = nn.Parameter(torch.randn(*weight_shape)) - self.wscale = wscale - else: - self.weight = nn.Parameter(torch.randn(*weight_shape) * wscale) - self.wscale = 1.0 - - if add_bias: - self.bias = nn.Parameter(torch.zeros(out_channels)) - else: - self.bias = None - - if activation_type == 'linear': - self.activate = nn.Identity() - elif activation_type == 'lrelu': - self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) - else: - raise NotImplementedError(f'Not implemented activation function: ' - f'`{activation_type}`!') - - def forward(self, x): - x = self.mbstd(x) - weight = self.weight * self.wscale - if self.use_stride: - weight = F.pad(weight, (1, 1, 1, 1, 0, 0, 0, 0), 'constant', 0.0) - weight = (weight[:, :, 1:, 1:] + weight[:, :, :-1, 1:] + - weight[:, :, 1:, :-1] + weight[:, :, :-1, :-1]) * 0.25 - x = F.conv2d(x, - weight=weight, - bias=self.bias, - stride=self.stride, - padding=self.padding) - x = self.activate(x) - x = self.downsample(x) - return x - - -class DenseBlock(nn.Module): - """Implements the dense block. - - Basically, this block executes fully-connected layer, and activation layer. - """ - - def __init__(self, - in_channels, - out_channels, - add_bias=True, - use_wscale=True, - wscale_gain=_WSCALE_GAIN, - activation_type='lrelu'): - """Initializes with block settings. - - Args: - in_channels: Number of channels of the input tensor. - out_channels: Number of channels of the output tensor. - add_bias: Whether to add bias onto the fully-connected result. - (default: True) - use_wscale: Whether to use weight scaling. (default: True) - wscale_gain: Gain factor for weight scaling. (default: _WSCALE_GAIN) - activation_type: Type of activation. Support `linear` and `lrelu`. - (default: `lrelu`) - - Raises: - NotImplementedError: If the `activation_type` is not supported. - """ - super().__init__() - weight_shape = (out_channels, in_channels) - wscale = wscale_gain / np.sqrt(in_channels) - if use_wscale: - self.weight = nn.Parameter(torch.randn(*weight_shape)) - self.wscale = wscale - else: - self.weight = nn.Parameter(torch.randn(*weight_shape) * wscale) - self.wscale = 1.0 - - if add_bias: - self.bias = nn.Parameter(torch.zeros(out_channels)) - else: - self.bias = None - - if activation_type == 'linear': - self.activate = nn.Identity() - elif activation_type == 'lrelu': - self.activate = nn.LeakyReLU(negative_slope=0.2, inplace=True) - else: - raise NotImplementedError(f'Not implemented activation function: ' - f'`{activation_type}`!') - - def forward(self, x): - if x.ndim != 2: - x = x.view(x.shape[0], -1) - x = F.linear(x, weight=self.weight * self.wscale, bias=self.bias) - x = self.activate(x) - return x diff --git a/spaces/hungchiayu/CaptionFLAN-T5/VT5.py b/spaces/hungchiayu/CaptionFLAN-T5/VT5.py deleted file mode 100644 index bb925a2fc22f06fa84e9a24393d026cc189e6ff0..0000000000000000000000000000000000000000 --- a/spaces/hungchiayu/CaptionFLAN-T5/VT5.py +++ /dev/null @@ -1,68 +0,0 @@ - -from typing import Tuple -import torch -from torch import nn -from transformers import ( - AutoModelForSeq2SeqLM, - AutoTokenizer, - Trainer, - TrainingArguments, -) - - -class MLP(nn.Module): - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return self.model(x) - - def __init__(self, sizes: Tuple[int, ...], bias=True, act=nn.Tanh): - super(MLP, self).__init__() - layers = [] - for i in range(len(sizes) - 1): - layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=bias)) - if i < len(sizes) - 2: - layers.append(act()) - self.model = nn.Sequential(*layers) - -class VT5(nn.Module): - - - def __init__(self,t5,tokenizer,vision_model,image_emb_size=512,prefix_length=10): - super().__init__() - self.t5 = t5 - self.tokenizer = tokenizer - self.t5_embedding_size = t5.get_input_embeddings().embedding_dim - self.image_emb_size = image_emb_size - self.prefix_length = prefix_length - self.vision_model = vision_model - ## This is the mapping networks that projects the image embedding space to the language model vector space - self.prefix_projection = MLP((self.image_emb_size, (self.t5_embedding_size * prefix_length) // 2, - self.t5_embedding_size * prefix_length)) - - def forward(self,pixel_values,output_ids): - - image_embeds = self.vision_model(pixel_values).image_embeds - - mapped_embedding = self.prefix_projection(image_embeds).view(-1,self.prefix_length,self.t5_embedding_size) - - ##concat_embedding = torch.cat([text_embedding,mapped_embedding],axis=1) - - output_ids[output_ids == self.tokenizer.pad_token_id] = -100 ## Do not compute loss w.r.t pad tokens - - outputs = self.t5(inputs_embeds=mapped_embedding,labels=output_ids) - - return outputs - - def generate_caption(self,pixel_values): - - image_embeds = self.vision_model(pixel_values).image_embeds - mapped_embedding = self.prefix_projection(image_embeds).view(-1,self.prefix_length,self.t5_embedding_size) - - output_tokens = self.t5.generate(inputs_embeds=mapped_embedding) - caption = self.tokenizer.decode(output_tokens[0],skip_special_tokens=True) - - return caption - - - - diff --git a/spaces/hysts/ControlNet-with-Anything-v4/app_pose.py b/spaces/hysts/ControlNet-with-Anything-v4/app_pose.py deleted file mode 100644 index ef409d4a89c17a6633be1ece9194afe0bb0a8d56..0000000000000000000000000000000000000000 --- a/spaces/hysts/ControlNet-with-Anything-v4/app_pose.py +++ /dev/null @@ -1,89 +0,0 @@ -# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_pose2image.py -# The original license file is LICENSE.ControlNet in this repo. -import gradio as gr - - -def create_demo(process, max_images=12, default_num_images=3): - with gr.Blocks() as demo: - with gr.Row(): - gr.Markdown('## Control Stable Diffusion with Human Pose') - with gr.Row(): - with gr.Column(): - input_image = gr.Image(source='upload', type='numpy') - prompt = gr.Textbox(label='Prompt') - run_button = gr.Button(label='Run') - with gr.Accordion('Advanced options', open=False): - is_pose_image = gr.Checkbox(label='Is pose image', - value=False) - gr.Markdown( - 'You can use [PoseMaker2](https://huggingface.co/spaces/jonigata/PoseMaker2) to create pose images.' - ) - num_samples = gr.Slider(label='Images', - minimum=1, - maximum=max_images, - value=default_num_images, - step=1) - image_resolution = gr.Slider(label='Image Resolution', - minimum=256, - maximum=512, - value=512, - step=256) - detect_resolution = gr.Slider(label='OpenPose Resolution', - minimum=128, - maximum=512, - value=512, - step=1) - num_steps = gr.Slider(label='Steps', - minimum=1, - maximum=100, - value=20, - step=1) - guidance_scale = gr.Slider(label='Guidance Scale', - minimum=0.1, - maximum=30.0, - value=9.0, - step=0.1) - seed = gr.Slider(label='Seed', - minimum=-1, - maximum=2147483647, - step=1, - randomize=True) - a_prompt = gr.Textbox( - label='Added Prompt', - value='best quality, extremely detailed') - n_prompt = gr.Textbox( - label='Negative Prompt', - value= - 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality' - ) - with gr.Column(): - result = gr.Gallery(label='Output', - show_label=False, - elem_id='gallery').style(grid=2, - height='auto') - inputs = [ - input_image, - prompt, - a_prompt, - n_prompt, - num_samples, - image_resolution, - detect_resolution, - num_steps, - guidance_scale, - seed, - is_pose_image, - ] - prompt.submit(fn=process, inputs=inputs, outputs=result) - run_button.click(fn=process, - inputs=inputs, - outputs=result, - api_name='pose') - return demo - - -if __name__ == '__main__': - from model import Model - model = Model() - demo = create_demo(model.process_pose) - demo.queue().launch() diff --git a/spaces/hzwluoye/gpt4/server/babel.py b/spaces/hzwluoye/gpt4/server/babel.py deleted file mode 100644 index 94407e4b4d3e82e7722cac409a7e311bb46c43be..0000000000000000000000000000000000000000 --- a/spaces/hzwluoye/gpt4/server/babel.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import subprocess -from flask import request, session, jsonify -from flask_babel import Babel - - -def get_languages_from_dir(directory): - """Return a list of directory names in the given directory.""" - return [name for name in os.listdir(directory) - if os.path.isdir(os.path.join(directory, name))] - - -BABEL_DEFAULT_LOCALE = 'en_US' -BABEL_LANGUAGES = get_languages_from_dir('translations') - - -def create_babel(app): - """Create and initialize a Babel instance with the given Flask app.""" - babel = Babel(app) - app.config['BABEL_DEFAULT_LOCALE'] = BABEL_DEFAULT_LOCALE - app.config['BABEL_LANGUAGES'] = BABEL_LANGUAGES - - babel.init_app(app, locale_selector=get_locale) - compile_translations() - - -def get_locale(): - """Get the user's locale from the session or the request's accepted languages.""" - return session.get('language') or request.accept_languages.best_match(BABEL_LANGUAGES) - - -def get_languages(): - """Return a list of available languages in JSON format.""" - return jsonify(BABEL_LANGUAGES) - - -def compile_translations(): - """Compile the translation files.""" - result = subprocess.run( - ['pybabel', 'compile', '-d', 'translations'], - stdout=subprocess.PIPE, - ) - - if result.returncode != 0: - raise Exception( - f'Compiling translations failed:\n{result.stdout.decode()}') - - print('Translations compiled successfully') diff --git a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/utils/plot.py b/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/utils/plot.py deleted file mode 100644 index ccc588e5c01ca550b69c385aeb3fd139c59fb88a..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/utils/plot.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding: utf-8 - -import os -from pathlib import Path - -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap -from prettytable import PrettyTable -from sklearn.metrics import roc_curve, auc - -image_path = "/data/anxiang/IJB_release/IJBC" -files = [ - "./ms1mv3_arcface_r100/ms1mv3_arcface_r100/ijbc.npy" -] - - -def read_template_pair_list(path): - pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) - return t1, t2, label - - -p1, p2, label = read_template_pair_list( - os.path.join('%s/meta' % image_path, - '%s_template_pair_label.txt' % 'ijbc')) - -methods = [] -scores = [] -for file in files: - methods.append(file.split('/')[-2]) - scores.append(np.load(file)) - -methods = np.array(methods) -scores = dict(zip(methods, scores)) -colours = dict( - zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2'))) -x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1] -tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels]) -fig = plt.figure() -for method in methods: - fpr, tpr, _ = roc_curve(label, scores[method]) - roc_auc = auc(fpr, tpr) - fpr = np.flipud(fpr) - tpr = np.flipud(tpr) # select largest tpr at same fpr - plt.plot(fpr, - tpr, - color=colours[method], - lw=1, - label=('[%s (AUC = %0.4f %%)]' % - (method.split('-')[-1], roc_auc * 100))) - tpr_fpr_row = [] - tpr_fpr_row.append("%s-%s" % (method, "IJBC")) - for fpr_iter in np.arange(len(x_labels)): - _, min_index = min( - list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr))))) - tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100)) - tpr_fpr_table.add_row(tpr_fpr_row) -plt.xlim([10 ** -6, 0.1]) -plt.ylim([0.3, 1.0]) -plt.grid(linestyle='--', linewidth=1) -plt.xticks(x_labels) -plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True)) -plt.xscale('log') -plt.xlabel('False Positive Rate') -plt.ylabel('True Positive Rate') -plt.title('ROC on IJB') -plt.legend(loc="lower right") -print(tpr_fpr_table) diff --git a/spaces/icayir/flofi_mini/app.py b/spaces/icayir/flofi_mini/app.py deleted file mode 100644 index 9cd10f866f8aee79d148eb18cacb8ca4bcd88a41..0000000000000000000000000000000000000000 --- a/spaces/icayir/flofi_mini/app.py +++ /dev/null @@ -1,173 +0,0 @@ -### 1. Imports and class names setup ### -import gradio as gr -import os -import torch - -from model import create_effnetb2_model -from timeit import default_timer as timer -from typing import Tuple, Dict - -# Setup class names -class_names= ['alpine sea holly', - 'anthurium', - 'artichoke', - 'azalea', - 'ball moss', - 'balloon flower', - 'barbeton daisy', - 'bearded iris', - 'bee balm', - 'bird of paradise', - 'bishop of llandaff', - 'black-eyed susan', - 'blackberry lily', - 'blanket flower', - 'bolero deep blue', - 'bougainvillea', - 'bromelia', - 'buttercup', - 'californian poppy', - 'camellia', - 'canna lily', - 'canterbury bells', - 'cape flower', - 'carnation', - 'cautleya spicata', - 'clematis', - "colt's foot", - 'columbine', - 'common dandelion', - 'corn poppy', - 'cyclamen', - 'daffodil', - 'desert-rose', - 'english marigold', - 'fire lily', - 'foxglove', - 'frangipani', - 'fritillary', - 'garden phlox', - 'gaura', - 'gazania', - 'geranium', - 'giant white arum lily', - 'globe thistle', - 'globe-flower', - 'grape hyacinth', - 'great masterwort', - 'hard-leaved pocket orchid', - 'hibiscus', - 'hippeastrum', - 'japanese anemone', - 'king protea', - 'lenten rose', - 'lotus lotus', - 'love in the mist', - 'magnolia', - 'mallow', - 'marigold', - 'mexican aster', - 'mexican petunia', - 'monkshood', - 'moon orchid', - 'morning glory', - 'orange dahlia', - 'osteospermum', - 'oxeye daisy', - 'passion flower', - 'pelargonium', - 'peruvian lily', - 'petunia', - 'pincushion flower', - 'pink primrose', - 'pink-yellow dahlia', - 'poinsettia', - 'primula', - 'prince of wales feathers', - 'purple coneflower', - 'red ginger', - 'rose', - 'ruby-lipped cattleya', - 'siam tulip', - 'silverbush', - 'snapdragon', - 'spear thistle', - 'spring crocus', - 'stemless gentian', - 'sunflower', - 'sweet pea', - 'sweet william', - 'sword lily', - 'thorn apple', - 'tiger lily', - 'toad lily', - 'tree mallow', - 'tree poppy', - 'trumpet creeper', - 'wallflower', - 'water lily', - 'watercress', - 'wild pansy', - 'windflower', - 'yellow iris' - ] - -### 2. Model and transforms preparation ### - -# Create EffNetB2 model -effnetb2, effnetb2_transforms = create_effnetb2_model( - num_classes=102, # len(class_names) would also work -) - -# Load saved weights -effnetb2.load_state_dict( - torch.load( - f="pretrained_effnetb2_feature_extractor_fl102.pth", - map_location=torch.device("cpu"), # load to CPU - ) -) - -### 3. Predict function ### - -# Create predict function -def predict(img) -> Tuple[Dict, float]: - """Transforms and performs a prediction on img and returns prediction and time taken. - """ - # Start the timer - start_time = timer() - - # Transform the target image and add a batch dimension - img = effnetb2_transforms(img).unsqueeze(0) - - # Put model into evaluation mode and turn on inference mode - effnetb2.eval() - with torch.inference_mode(): - # Pass the transformed image through the model and turn the prediction logits into prediction probabilities - pred_probs = torch.softmax(effnetb2(img), dim=1) - - # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter) - pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))} - - # Calculate the prediction time - pred_time = round(timer() - start_time, 5) - - # Return the prediction dictionary and prediction time - return pred_labels_and_probs, pred_time - -### 4. Gradio app ### - -title = "Flofi Demo" -description = "An EfficientNetB2 feature extractor computer vision model to classify images of 102 flower species." -article = "Created by Haydar Uçar." - -# Create the Gradio demo -demo = gr.Interface(fn=predict, # mapping function from input to output - inputs=gr.Image(type="pil"), # what are the inputs? - outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs? - gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs - title=title, - description=description, - article=article) - -# Launch the demo! -demo.launch() # generate a publically shareable URL? diff --git a/spaces/imseldrith/Imagine/tapp.py b/spaces/imseldrith/Imagine/tapp.py deleted file mode 100644 index eecfa67bcf9739f4e535ce45adb6a7560d16d7ac..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/Imagine/tapp.py +++ /dev/null @@ -1,106 +0,0 @@ -from flask import Flask, render_template, request, send_file, jsonify -from imaginepy import AsyncImagine #, Style, Ratio, Model -from imaginepy.constants import * -import os - -app = Flask(__name__) - -@app.route('/') -def index(): - return render_template('index.html') - -@app.route('/generate', methods=['POST']) -async def generate_image(): - prompt = request.form['prompt'] - style = request.form['style'] - ratio = request.form['ratio'] - model = request.form['model'] - - imagine = AsyncImagine() - - try: - img_data = await imagine.sdprem( - prompt=prompt, - style=Style[style], - ratio=Ratio[ratio], - seed="", - cfg=16, - model=Model[model], - asbase64=False - ) - except Exception as e: - return f"An error occurred while generating the image: {e}" - - if img_data is None: - return "An error occurred while generating the image." - - img_data = await imagine.upscale(img_data) - - if img_data is None: - return "An error occurred while upscaling the image." - - try: - with open("static/example.jpeg", mode="wb") as img_file: - img_file.write(img_data) - except Exception as e: - return f"An error occurred while writing the image to file: {e}" - finally: - await imagine.close() - return render_template('output.html') - - -@app.route('/api/generate', methods=['POST']) -def api_generate_image(): - data = request.get_json() - prompt = data['prompt'] - style = data['style'] - ratio = data['ratio'] - model = data['model'] - - imagine = Imagine() - - try: - img_data = imagine.sdprem( - prompt=prompt, - style=Style[style], - ratio=Ratio[ratio], - seed='', - cfg=16, - model=Model[model], - asbase64=False - ) - except Exception as e: - return jsonify({'error': f"An error occurred while generating the image: {e}"}), 500 - - if img_data is None: - return jsonify({'error': "An error occurred while generating the image."}), 500 - - img_data = imagine.upscale(img_data) - - if img_data is None: - return jsonify({'error': "An error occurred while upscaling the image."}), 500 - - try: - image_path = os.path.join(app.root_path, "generated.jpeg") - with open(image_path, mode="wb") as img_file: - img_file.write(img_data) - except Exception as e: - return jsonify({'error': f"An error occurred while writing the image to file: {e}"}), 500 - - finally: - imagine.close() - - return send_file(image_path, mimetype='image/jpeg', as_attachment=True) - -@app.errorhandler(404) -def page_not_found(e): - # Render the 404.html template - return render_template('404.html'), 404 - -@app.route('/api-docs') -def api_docs(): - return render_template('api_docs.html') - -if __name__ == "__main__": - app.run(host="0.0.0.0", port=7860,debug=True) - \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Call Of Duty Black Ops 3 Crack Pc Download The Ultimate Guide for Gamers.md b/spaces/inamXcontru/PoeticTTS/Call Of Duty Black Ops 3 Crack Pc Download The Ultimate Guide for Gamers.md deleted file mode 100644 index 188ba433710ea538db19d96f9dbbb37d276a1f93..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Call Of Duty Black Ops 3 Crack Pc Download The Ultimate Guide for Gamers.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    Call Of Duty Black Ops 3 Lag Fix Download Setup in single direct link for windows PC. You can easily fix any call of duty black ops 3 version of game lag or freeze issue on game startup after applying this fix.

    -

    Call Of Duty Black Ops 3 Crack Pc Download


    Download Zip ✓✓✓ https://gohhs.com/2uz3vK



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Can You Download Friends Pictures From Facebook Hochzeitstag Markt O The Best Way to Backup Your Images.md b/spaces/inamXcontru/PoeticTTS/Can You Download Friends Pictures From Facebook Hochzeitstag Markt O The Best Way to Backup Your Images.md deleted file mode 100644 index a324fd498807106a56015dd040aab2b0cd06255c..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Can You Download Friends Pictures From Facebook Hochzeitstag Markt O The Best Way to Backup Your Images.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Can You Download Friends Pictures From Facebook hochzeitstag markt o


    Download ✦✦✦ https://gohhs.com/2uz3mD



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/inamXcontru/PoeticTTS/Chessbase - mega pack [rbc] Learn from the best with more than 85000 annotated games by grandmasters.md b/spaces/inamXcontru/PoeticTTS/Chessbase - mega pack [rbc] Learn from the best with more than 85000 annotated games by grandmasters.md deleted file mode 100644 index f219eb7f3107bc5458e8ca1ff9f316e873d639c8..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Chessbase - mega pack [rbc] Learn from the best with more than 85000 annotated games by grandmasters.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Chessbase - mega pack [rbc]


    Download Ziphttps://gohhs.com/2uz4it



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/La Brujula Para El Ministro Evangelico Descargar 96.md b/spaces/inplisQlawa/anything-midjourney-v4-1/La Brujula Para El Ministro Evangelico Descargar 96.md deleted file mode 100644 index 6cfb00fcd2ea7b90bae2147dfaba1d095d496799..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/La Brujula Para El Ministro Evangelico Descargar 96.md +++ /dev/null @@ -1,83 +0,0 @@ -
    -

    La Brujula Para El Ministro Evangelico Descargar 96: Un recurso indispensable para el liderazgo cristiano en la América Latina

    - -

    ¿Qué es la brújula para el ministro evangélico? Es un libro que reúne los consejos y las experiencias de 23 destacados dirigentes evangélicos de la América Latina, que han colaborado en la redacción de esta obra con el fin de orientar y capacitar a los ministros y obreros cristianos en su labor. Es una obra oportuna que todos esperábamos.

    - -

    ¿Por qué descargar la brújula para el ministro evangélico? Porque es un libro que aborda los temas más relevantes y actuales para el ministerio evangélico en la América Latina, tales como:

    -

    La Brujula Para El Ministro Evangelico Descargar 96


    Download File ❤❤❤ https://urlin.us/2uEytU



    - -
      -
    • El carácter, la mente, la salud física y el hogar del ministro.
    • -
    • Las finanzas personales, el ministerio persona a persona, el aprovechamiento del tiempo y las ceremonias del ministro.
    • -
    • La formación de obreros, la disciplina, el trabajo en equipo y la administración del ministro.
    • -
    • La animación de la iglesia, el asesoramiento espiritual, las reuniones y la música en la iglesia.
    • -
    • El trabajo social, la evangelización y la participación en la comunidad del ministro.
    • -
    - -

    ¿Cómo descargar la brújula para el ministro evangélico? Es muy fácil. Solo tienes que hacer clic en el siguiente enlace y podrás acceder al libro en formato PDF. Podrás leerlo en tu computadora, tablet o celular, o imprimirlo si lo prefieres. No te pierdas esta oportunidad de obtener este valioso recurso para tu ministerio.

    - -Descargar La Brujula Para El Ministro Evangelico - -

    La Brujula Para El Ministro Evangelico Descargar 96: Un libro que te ayudará a crecer y a servir mejor

    - -

    La brújula para el ministro evangélico no es solo un libro más. Es un libro que te ayudará a crecer y a servir mejor como ministro evangélico en la América Latina. Es un libro que te dará la orientación necesaria para desempeñar con más éxito la misión de comunicar el sagrado evangelio. Es un libro que te inspirará y te desafiará a ser un mejor líder cristiano.

    - -

    La brújula para el ministro evangélico es un libro que ha sido escrito por personas que conocen de primera mano los retos y las oportunidades del ministerio evangélico en la América Latina. Son personas que han dedicado su vida al servicio de Dios y de su pueblo. Son personas que tienen una sólida formación bíblica y una amplia actuación en sus respectivos campos de labor. Son personas que han querido compartir sus conocimientos y sus testimonios con otros ministros y obreros cristianos.

    - -

    La brújula para el ministro evangélico es un libro que no puedes dejar de leer si quieres ser un ministro evangélico eficaz y relevante en la América Latina. Es un libro que te equipará con las herramientas necesarias para enfrentar los desafíos y aprovechar las oportunidades del ministerio evangélico en la América Latina. Es un libro que te acompañará en tu camino de crecimiento personal y ministerial.

    - -

    No esperes más. Descarga ahora mismo la brújula para el ministro evangélico y empieza a disfrutar de este excelente recurso para tu ministerio.

    -

    - -Descargar La Brujula Para El Ministro Evangelico

    -

    La Brujula Para El Ministro Evangelico Descargar 96: Un libro que te enseñará los principios y las prácticas del ministerio evangélico

    - -

    La brújula para el ministro evangélico no es solo un libro que te informará sobre el ministerio evangélico en la América Latina. Es un libro que te enseñará los principios y las prácticas del ministerio evangélico, basados en la Palabra de Dios y en la experiencia de los autores. Es un libro que te mostrará cómo aplicar esos principios y prácticas en tu contexto y en tu ministerio.

    - -

    La brújula para el ministro evangélico es un libro que te ayudará a desarrollar tu carácter, tu mente, tu salud física y tu hogar como ministro evangélico. Te ayudará a manejar tus finanzas personales, a relacionarte con las personas, a aprovechar el tiempo y a oficiar las ceremonias como ministro evangélico. Te ayudará a formar obreros, a ejercer la disciplina, a trabajar en equipo y a administrar el ministerio como ministro evangélico. Te ayudará a animar la iglesia, a asesorar espiritualmente, a dirigir las reuniones y a usar la música en la iglesia como ministro evangélico. Te ayudará a realizar el trabajo social, la evangelización y la participación en la comunidad como ministro evangélico.

    - -

    La brújula para el ministro evangélico es un libro que te ofrecerá consejos prácticos, ejemplos concretos, testimonios personales y recursos útiles para cada aspecto del ministerio evangélico. Es un libro que te motivará a seguir creciendo y aprendiendo como ministro evangélico. Es un libro que te retará a ser fiel y fructífero como ministro evangélico.

    - -

    No dejes pasar esta oportunidad de descargar la brújula para el ministro evangélico y de beneficiarte de este magnífico recurso para tu ministerio.

    - -Descargar La Brujula Para El Ministro Evangelico -

    La Brujula Para El Ministro Evangelico Descargar 96: Un libro que te conectará con otros ministros y obreros evangélicos de la América Latina

    - -

    La brújula para el ministro evangélico no es solo un libro que te orientará sobre el ministerio evangélico en la América Latina. Es un libro que te conectará con otros ministros y obreros evangélicos de la América Latina, que han escrito este libro con el propósito de compartir sus vivencias y sus aprendizajes con otros colegas en el ministerio. Es un libro que te hará sentir parte de una gran familia de siervos de Dios.

    - -

    La brújula para el ministro evangélico es un libro que te permitirá conocer las realidades y los desafíos del ministerio evangélico en la América Latina, desde la perspectiva de 23 autores que representan a diferentes países, denominaciones, ministerios y generaciones. Es un libro que te mostrará la diversidad y la riqueza del ministerio evangélico en la América Latina, así como los puntos en común y las necesidades compartidas.

    - -

    La brújula para el ministro evangélico es un libro que te invitará a dialogar y a colaborar con otros ministros y obreros evangélicos de la América Latina, para fortalecer el testimonio y el impacto del evangelio en nuestra región. Es un libro que te animará a apoyar y a orar por otros ministros y obreros evangélicos de la América Latina, para que Dios los bendiga y los use en su obra.

    - -

    No dudes más. Descarga ya la brújula para el ministro evangélico y empieza a disfrutar de este maravilloso recurso para tu ministerio.

    - -Descargar La Brujula Para El Ministro Evangelico -
    La Brujula Para El Ministro Evangelico Descargar 96: Un libro que te recomendará los mejores recursos para el ministerio evangélico
    - -

    La brújula para el ministro evangélico no es solo un libro que te instruirá sobre el ministerio evangélico en la América Latina. Es un libro que te recomendará los mejores recursos para el ministerio evangélico, que te servirán para ampliar tus conocimientos, mejorar tus habilidades y enriquecer tu ministerio. Es un libro que te facilitará el acceso a los recursos más útiles y actualizados para el ministerio evangélico.

    - -

    La brújula para el ministro evangélico es un libro que te proporcionará una bibliografía selecta y comentada de los libros más importantes y relevantes para el ministerio evangélico en la América Latina, clasificados por temas y áreas de interés. Es un libro que te indicará las direcciones de internet de las páginas web más interesantes y confiables para el ministerio evangélico en la América Latina, donde podrás encontrar información, materiales, cursos y contactos. Es un libro que te sugerirá las revistas, los boletines, los periódicos y los programas de radio y televisión más informativos y edificantes para el ministerio evangélico en la América Latina, donde podrás estar al día de las noticias, las tendencias y las experiencias del ministerio evangélico.

    - -

    La brújula para el ministro evangélico es un libro que te ahorrará tiempo y dinero en la búsqueda de los recursos para el ministerio evangélico. Es un libro que te garantizará la calidad y la confiabilidad de los recursos para el ministerio evangélico. Es un libro que te pondrá en contacto con los recursos para el ministerio evangélico más adecuados y pertinentes para tu contexto y tu ministerio.

    - -

    No lo dudes más. Descarga ya la brújula para el ministro evangélico y empieza a disfrutar de este increíble recurso para tu ministerio.

    - -Descargar La Brujula Para El Ministro Evangelico -La Brujula Para El Ministro Evangelico Descargar 96: Un libro que te bendecirá y te hará bendición para el ministerio evangélico - -

    La brújula para el ministro evangélico no es solo un libro que te guiará en el ministerio evangélico en la América Latina. Es un libro que te bendecirá y te hará bendición para el ministerio evangélico, que te llenará de gracia y de poder para cumplir tu llamado y tu propósito. Es un libro que te acercará más a Dios y a su voluntad para tu vida y tu ministerio.

    - -

    La brújula para el ministro evangélico es un libro que te nutrirá con la Palabra de Dios y con el testimonio de otros siervos de Dios, que te fortalecerá en tu fe y en tu compromiso, que te renovará en tu visión y en tu pasión. Es un libro que te desafiará a crecer en tu carácter y en tu mente, en tu salud física y en tu hogar, en tus finanzas personales y en tu ministerio persona a persona, en tu aprovechamiento del tiempo y en tus ceremonias, en tu formación de obreros y en tu disciplina, en tu trabajo en equipo y en tu administración, en tu animación de la iglesia y en tu asesoramiento espiritual, en tus reuniones y en tu música en la iglesia, en tu trabajo social y en tu evangelización, en tu participación en la comunidad y en tu gloria a Dios.

    - -

    La brújula para el ministro evangélico es un libro que te equipará con los mejores recursos para el ministerio evangélico, que te facilitará el acceso a los libros, las páginas web, las revistas, los boletines, los periódicos y los programas de radio y televisión más importantes y relevantes para el ministerio evangélico. Es un libro que te conectará con otros ministros y obreros evangélicos de la América Latina, que te permitirá conocer sus realidades y sus desafíos, sus experiencias y sus aprendizajes, sus necesidades y sus oportunidades. Es un libro que te invitará a dialogar y a colaborar con ellos, a apoyarlos y a orar por ellos.

    - -

    No pierdas esta oportunidad de descargar la brújula para el ministro evangélico y de beneficiarte de este extraordinario recurso para tu ministerio.

    - -Descargar La Brujula Para El Ministro Evangelico -

    La brújula para el ministro evangélico es un libro que todo ministro y obrero evangélico de la América Latina debe leer y tener en su biblioteca. Es un libro que te orientará, te capacitará, te desafiará, te bendecirá y te hará bendición para el ministerio evangélico. Es un libro que te enseñará los principios y las prácticas del ministerio evangélico, que te recomendará los mejores recursos para el ministerio evangélico, que te conectará con otros ministros y obreros evangélicos de la América Latina. Es un libro que te ayudará a crecer y a servir mejor como ministro evangélico en la América Latina. Es un libro que te ayudará a ser un ministro evangélico comprometido y transformador.

    - -

    No esperes más. Descarga ahora mismo la brújula para el ministro evangélico y empieza a disfrutar de este excelente recurso para tu ministerio. Te aseguramos que no te arrepentirás.

    - -Descargar La Brujula Para El Ministro Evangelico

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Amazing Frog Download By Apunkagames.md b/spaces/inreVtussa/clothingai/Examples/Amazing Frog Download By Apunkagames.md deleted file mode 100644 index 823712c6d7462df74e7d281c47caac671b87bcb0..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Amazing Frog Download By Apunkagames.md +++ /dev/null @@ -1,10 +0,0 @@ -

    Amazing Frog download by apunkagames


    DOWNLOAD ••• https://tiurll.com/2uCmpm



    -
    -Without a doubt, this is the best game the Super NES has ever seen. POORLY. Sorry, that? Not in this game! HORRIBLE. Any other game after playing ... ►► Nintendo 64 Ultimate All-Stars -Super Mario Bros 2 is a platform game for the Super Nintendo Entertainment System. The game was developed in Japan by Nintendo and released in North America in 1997 by the American company Atari. -Three playable characters can be used in the game - Mario, Luigi and the Three-Headed Goomba. -In Japan, the game was called "Super Mario Bros 2.". -During the game, Mario and Luigi travel across a platform that contains obstacles to overcome. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/inreVtussa/clothingai/Examples/Crack Science 66 Gdmath 9.md b/spaces/inreVtussa/clothingai/Examples/Crack Science 66 Gdmath 9.md deleted file mode 100644 index 24956004cff00dbec8f238c7afcd65258916ddc2..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Crack Science 66 Gdmath 9.md +++ /dev/null @@ -1,16 +0,0 @@ -

    Crack Science 66 Gdmath 9


    Download Zip ✓✓✓ https://tiurll.com/2uCkjy



    - -science 66-gdmath9-crack<br> download science 66-gdmath9. ) Original Article: Science 66 Gdmath 9Factors influencing the quality of discharge summaries in patients admitted to a medical ward. - -Discharge summaries have been shown to improve patient care. However, the factors that influence the quality of discharge summaries remain largely unexplored. In this study, we aimed to evaluate factors associated with the quality of discharge summaries in medical inpatients. A prospective study was conducted at the Kaohsiung Veterans General Hospital, Taiwan. Medical inpatients discharged in 2012 were enrolled. Discharge summaries were assessed for quality by two authors independently using a 10-item quality assessment scale. The quality of discharge summaries was divided into four quality levels according to a score of ≤7, 8-9, 10-11, and ≥12. A total of 286 discharge summaries were analyzed. The mean age of the patients was 67.3 ± 16.8 years; 42.4% were male. The mean score for quality of discharge summaries was 9.1 ± 1.1. The factors influencing the quality of discharge summaries were male sex, older age, admission from the emergency department, and longer length of stay. Male sex, admission from the emergency department, and longer length of stay were independent factors influencing the quality of discharge summaries. It is important for healthcare professionals to recognize factors affecting the quality of discharge summaries so that they can improve the quality of discharge letters.Q: - -A way to remove elements from list while iterating over list? - -I want to remove an element from the list while iterating over the list. Below is the code I have: - - for item in list: - - if item Q: 4fefd39f24
    -
    -
    -

    diff --git a/spaces/insomniac0/Midnight/README.md b/spaces/insomniac0/Midnight/README.md deleted file mode 100644 index e6ee6794cfb23c893143daa60009b40cd5b6cd90..0000000000000000000000000000000000000000 --- a/spaces/insomniac0/Midnight/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Midnight -emoji: 👁 -colorFrom: purple -colorTo: indigo -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/jackyliang42/code-as-policies/robotiq_2f_85/README.md b/spaces/jackyliang42/code-as-policies/robotiq_2f_85/README.md deleted file mode 100644 index 82cbae156ff1b2cdf7e4d0858bb4f3e7092cf7a3..0000000000000000000000000000000000000000 --- a/spaces/jackyliang42/code-as-policies/robotiq_2f_85/README.md +++ /dev/null @@ -1,52 +0,0 @@ -## Robotiq 2F 85 gripper -For this gripper, the following Github repo can be used as a reference: https://github.com/Shreeyak/robotiq.git - -### mimic tag in URDF -This gripper is developed for ROS and uses the `mimic` tag within the URDF files to make the gripper move. From our research `mimic` tag within URDF is not supported by pybullet. To overcome this, one can use the `createConstraint` function. Please refer to [this](https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/mimicJointConstraint.py) example from the bullet3 repo to see how to replicate a `mimic` joint: - -```python -#a mimic joint can act as a gear between two joints -#you can control the gear ratio in magnitude and sign (>0 reverses direction) - -import pybullet as p -import time -p.connect(p.GUI) -p.loadURDF("plane.urdf",0,0,-2) -wheelA = p.loadURDF("differential/diff_ring.urdf",[0,0,0]) -for i in range(p.getNumJoints(wheelA)): - print(p.getJointInfo(wheelA,i)) - p.setJointMotorControl2(wheelA,i,p.VELOCITY_CONTROL,targetVelocity=0,force=0) - - -c = p.createConstraint(wheelA,1,wheelA,3,jointType=p.JOINT_GEAR,jointAxis =[0,1,0],parentFramePosition=[0,0,0],childFramePosition=[0,0,0]) -p.changeConstraint(c,gearRatio=1, maxForce=10000) - -c = p.createConstraint(wheelA,2,wheelA,4,jointType=p.JOINT_GEAR,jointAxis =[0,1,0],parentFramePosition=[0,0,0],childFramePosition=[0,0,0]) -p.changeConstraint(c,gearRatio=-1, maxForce=10000) - -c = p.createConstraint(wheelA,1,wheelA,4,jointType=p.JOINT_GEAR,jointAxis =[0,1,0],parentFramePosition=[0,0,0],childFramePosition=[0,0,0]) -p.changeConstraint(c,gearRatio=-1, maxForce=10000) - - -p.setRealTimeSimulation(1) -while(1): - p.setGravity(0,0,-10) - time.sleep(0.01) -#p.removeConstraint(c) - -``` - - -Details on `createConstraint` can be found in the pybullet [getting started](https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#heading=h.fq749wu22x4c) guide. - -### Files in folder -Since parameters like gear ratio and direction are required, one can find the `robotiq_2f_85_mimic_joints.urdf` which contains the mimic tags as in original URDF, which can be used as a reference. It was generated from `robotiq/robotiq_2f_robot/robot/simple_rq2f85_pybullet.urdf.xacro` as so: -``` -rosrun xacro xacro --inorder simple_rq2f85_pybullet.urdf.xacro -adaptive_transmission:="true" > robotiq_2f_85_mimic_joints.urdf -``` - -The URDF meant for use in pybullet is `robotiq_2f_85.urdf` and it is generated in a similar manner as above by running: -``` -rosrun xacro xacro --inorder simple_rq2f85_pybullet.urdf.xacro > robotiq_2f_85.urdf -``` \ No newline at end of file diff --git a/spaces/jbilcke-hf/observer/src/app/observe.tsx b/spaces/jbilcke-hf/observer/src/app/observe.tsx deleted file mode 100644 index a653b1f6272a380ea059460b65cb852f3dbf9231..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/observer/src/app/observe.tsx +++ /dev/null @@ -1,137 +0,0 @@ -"use client" - -import { useCallback, useEffect, useRef, useState, useTransition } from "react" -import { useInterval } from "usehooks-ts" -import Webcam from "react-webcam" -import AutoSizer from "react-virtualized-auto-sizer" - -import { see } from "./engine/see" -import { Progress } from "./interface/progress" - -export function Observe({ - onObserve, -}: { - onObserve: (observation: string, image: string) => void -}) { - const [_isPending, startTransition] = useTransition() - const [img, setImg] = useState("") - const webcamRef = useRef(null) - const [isInitialized, setInitialized] = useState(false) - const [frameNumber, setFrameNumber] = useState(0) - const [isBusy, setBusy] = useState(false) - const [lastObservation, setLastObservation] = useState("Nothing to see yet.") - const [lastObservedAt, setLastObservedAt] = useState(Date.now()) - - const defaultWidth = 1280 - const defaultHeight = 1024 // 720 - - // minimum wait time between calls - const minimumWaitTimeInSec = 10 - - // in case we need to record a video, check the last part of - // https://blog.openreplay.com/capture-real-time-images-and-videos-with-react-webcam/ - const capture = useCallback(() => { - if (!webcamRef.current) { return } - const imageSrc = webcamRef.current.getScreenshot() - if (!imageSrc) { return } - setImg(imageSrc) - setFrameNumber(frameNumber + 1) - - return imageSrc - }, [webcamRef]) - - // note: for some strange reason, the webcam (at least on macOS) - // has a "fade in effect", which means in the first few seconds, - // eg. if we capture at 800ms, if will be darker than normal - - useEffect(() => { - if (webcamRef.current && img && !isInitialized) { - setInitialized(true) - } - }, [webcamRef.current, img, isInitialized]) - - const observe = () => { - if (isBusy) { - // console.log("we are already predicting: skippping turn") - return - } - - const currentTimeInMs = Date.now() - const elapsedTimeInMs = currentTimeInMs - lastObservedAt - const elapsedTimeInSec = elapsedTimeInMs / 1000 - if (elapsedTimeInSec < minimumWaitTimeInSec) { - // console.log("minimum wait time between calls not reached: skipping turn") - return - } - - setBusy(true) - - // console.log("Capturing new frame from webcam..") - - startTransition(async () => { - const imageBase64 = capture() - if (!imageBase64) { - console.log("Failed to capture a new frame") - setTimeout(() => { - setBusy(false) - setLastObservedAt(Date.now()) - }, 2000) - return - } - const prompt = `What do you see here?` - - console.log("JULIAN: disabled watch") - - - // console.log("Calling IDEFICS..") - const newObservation = await see({ prompt, imageBase64 }) - - // console.log("New observation: ", newObservation) - if (newObservation && newObservation !== lastObservation) { - // console.log("update!") - setLastObservation(newObservation || "") - onObserve(newObservation || "", imageBase64) - } - setLastObservedAt(Date.now()) - - // comment to disable the infinite loop! - setBusy(false) - }) - - // console.log("observation ended!") - } - - useInterval(() => { - observe() - }, 1000) - - return ( - - {({ height, width }) => ( - <> - - - - )} - - ) -} diff --git a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/__init__.py b/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/__init__.py deleted file mode 100644 index 970e2c8ce7f90afab089bf84e249af5ee7124951..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/data/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Copyright (c) Meta Platforms, Inc. All Rights Reserved - -from .dataset_mappers import * -from . import datasets -from .build import ( - build_detection_train_loader, - build_detection_test_loader, -) diff --git a/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/modules/ema.py b/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/modules/ema.py deleted file mode 100644 index c8c75af43565f6e140287644aaaefa97dd6e67c5..0000000000000000000000000000000000000000 --- a/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/modules/ema.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -from torch import nn - - -class LitEma(nn.Module): - def __init__(self, model, decay=0.9999, use_num_upates=True): - super().__init__() - if decay < 0.0 or decay > 1.0: - raise ValueError('Decay must be between 0 and 1') - - self.m_name2s_name = {} - self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) - self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates - else torch.tensor(-1,dtype=torch.int)) - - for name, p in model.named_parameters(): - if p.requires_grad: - #remove as '.'-character is not allowed in buffers - s_name = name.replace('.','') - self.m_name2s_name.update({name:s_name}) - self.register_buffer(s_name,p.clone().detach().data) - - self.collected_params = [] - - def forward(self,model): - decay = self.decay - - if self.num_updates >= 0: - self.num_updates += 1 - decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) - - one_minus_decay = 1.0 - decay - - with torch.no_grad(): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - - for key in m_param: - if m_param[key].requires_grad: - sname = self.m_name2s_name[key] - shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) - shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) - else: - assert not key in self.m_name2s_name - - def copy_to(self, model): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - for key in m_param: - if m_param[key].requires_grad: - m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) - else: - assert not key in self.m_name2s_name - - def store(self, parameters): - """ - Save the current parameters for restoring later. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - temporarily stored. - """ - self.collected_params = [param.clone() for param in parameters] - - def restore(self, parameters): - """ - Restore the parameters stored with the `store` method. - Useful to validate the model with EMA parameters without affecting the - original optimization process. Store the parameters before the - `copy_to` method. After validation (or model saving), use this to - restore the former parameters. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored parameters. - """ - for c_param, param in zip(self.collected_params, parameters): - param.data.copy_(c_param.data) diff --git a/spaces/jharrison27/VR-DEMO/style.css b/spaces/jharrison27/VR-DEMO/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/jharrison27/VR-DEMO/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/jie1/jie_test4/login.py b/spaces/jie1/jie_test4/login.py deleted file mode 100644 index e70f195411ce4e2fe394a948223e39702dca9ce6..0000000000000000000000000000000000000000 --- a/spaces/jie1/jie_test4/login.py +++ /dev/null @@ -1,147 +0,0 @@ -import tkinter as tk -import tkinter.messagebox -import pickle -import os - -from PIL import Image, ImageTk - - -class Login: - window = None - right = 0 - - # 登录函数 - def usr_log_in(self): - # 输入框获取用户名密码 - usr_name = self.var_usr_name.get() - usr_pwd = self.var_usr_pwd.get() - # 从本地字典获取用户信息,如果没有则新建本地数据库 - try: - with open('usr_info.pickle', 'rb') as usr_file: - usrs_info = pickle.load(usr_file) - except FileNotFoundError: - with open('usr_info.pickle', 'wb') as usr_file: - usrs_info = {'admin': 'admin'} - pickle.dump(usrs_info, usr_file) - # 判断用户名和密码是否匹配 - if usr_name in usrs_info: - if usr_pwd == usrs_info[usr_name]: - tk.messagebox.showinfo(title='welcome', message='欢迎您:' + usr_name) - self.window.destroy() - self.right = True - else: - tk.messagebox.showerror(message='密码错误') - # 用户名密码不能为空 - elif usr_name == '' or usr_pwd == '': - tk.messagebox.showerror(message='用户名或密码为空') - # 不在数据库中弹出是否注册的框 - else: - is_signup = tk.messagebox.askyesno('欢迎', '您还没有注册,是否现在注册') - if is_signup: - self.usr_sign_up() - - # 注册函数 - def usr_sign_up(self): - # 确认注册时的相应函数 - def signtowcg(): - # 获取输入框内的内容 - nn = new_name.get() - np = new_pwd.get() - npf = new_pwd_confirm.get() - - # 本地加载已有用户信息,如果没有则已有用户信息为空 - try: - with open('usr_info.pickle', 'rb') as usr_file: - exist_usr_info = pickle.load(usr_file) - except FileNotFoundError: - exist_usr_info = {} - - # 检查用户名存在、密码为空、密码前后不一致 - if nn in exist_usr_info: - tk.messagebox.showerror('错误', '用户名已存在') - elif np == '' or nn == '': - tk.messagebox.showerror('错误', '用户名或密码为空') - elif np != npf: - tk.messagebox.showerror('错误', '密码前后不一致') - # 注册信息没有问题则将用户名密码写入数据库 - else: - exist_usr_info[nn] = np - with open('usr_info.pickle', 'wb') as usr_file: - pickle.dump(exist_usr_info, usr_file) - tk.messagebox.showinfo('欢迎', '注册成功') - # 注册成功关闭注册框 - window_sign_up.destroy() - - # 新建注册界面 - window_sign_up = tk.Toplevel(self.window) - window_sign_up.geometry('350x200') - window_sign_up.title('注册') - # 用户名变量及标签、输入框 - new_name = tk.StringVar() - tk.Label(window_sign_up, text='用户名:').place(x=10, y=10) - tk.Entry(window_sign_up, textvariable=new_name).place(x=150, y=10) - # 密码变量及标签、输入框 - new_pwd = tk.StringVar() - tk.Label(window_sign_up, text='请输入密码:').place(x=10, y=50) - tk.Entry(window_sign_up, textvariable=new_pwd, show='*').place(x=150, y=50) - # 重复密码变量及标签、输入框 - new_pwd_confirm = tk.StringVar() - tk.Label(window_sign_up, text='请再次输入密码:').place(x=10, y=90) - tk.Entry(window_sign_up, textvariable=new_pwd_confirm, show='*').place(x=150, y=90) - # 确认注册按钮及位置 - bt_confirm_sign_up = tk.Button(window_sign_up, text='确认注册', - command=signtowcg) - bt_confirm_sign_up.place(x=150, y=130) - - # 退出的函数 - def usr_sign_quit(self): - self.window.destroy() - self.right = False - - def __init__(self): - # 窗口 - self.window = tk.Tk() - self.window.title('花卉识别系统') - self.window.geometry('450x300') - self.topWidth = 450 - self.topheight = 300 - - # 窗口居中 - screenwidth = self.window.winfo_screenwidth() - screenheight = self.window.winfo_screenheight() - alignstr = '%dx%d+%d+%d' % ( - self.topWidth, self.topheight, (screenwidth - self.topWidth) / 2, (screenheight - self.topheight) / 2) - self.window.geometry(alignstr) - ima = Image.open(os.path.dirname(os.path.realpath(__file__)) + '/bg.jpg').resize((500, 300)) - ima = ImageTk.PhotoImage(ima) - - # 画布放置图片 - canvas = tk.Canvas(self.window, height=300, width=500, ) - image = canvas.create_image(0, 0, anchor='nw', image=ima) - canvas.pack(side='top') - - # 标签 用户名密码 - tk.Label(self.window, text='用户名:').place(x=100, y=150) - tk.Label(self.window, text='密码:').place(x=100, y=190) - - # 用户名输入框 - self.var_usr_name = tk.StringVar() - self.entry_usr_name = tk.Entry(self.window, textvariable=self.var_usr_name) - self.entry_usr_name.place(x=160, y=150) - # self.entry_usr_name.place(x=140, y=230) - - # 密码输入框 - self.var_usr_pwd = tk.StringVar() - self.entry_usr_pwd = tk.Entry(self.window, textvariable=self.var_usr_pwd, show='*') - self.entry_usr_pwd.place(x=160, y=190) - - # 登录 注册按钮 - self.bt_login = tk.Button(self.window, text='登录', command=self.usr_log_in) - self.bt_login.place(x=140, y=230) - self.bt_logup = tk.Button(self.window, text='注册', command=self.usr_sign_up) - self.bt_logup.place(x=210, y=230) - self.bt_logquit = tk.Button(self.window, text='退出', command=self.usr_sign_quit) - self.bt_logquit.place(x=280, y=230) - # 主循环 - self.window.mainloop() - diff --git a/spaces/jiejiejie0420/bingo/src/components/ui/voice/index.tsx b/spaces/jiejiejie0420/bingo/src/components/ui/voice/index.tsx deleted file mode 100644 index 4adcb632226bfced8b97092782811edf08b56569..0000000000000000000000000000000000000000 --- a/spaces/jiejiejie0420/bingo/src/components/ui/voice/index.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import './index.scss' - -export interface VoiceProps extends CSSPropertyRule { - num?: number; - duration?: number; -} -export default function Voice({ duration = 400, num = 7, ...others }) { - return ( -
    - {Array.from({ length: num }).map((_, index) => { - const randomDuration = Math.random() * 100 + duration - const initialDelay = Math.random() * 2 * duration - const initialScale = Math.sin((index + 1) * Math.PI / num) - return ( -
    - ) - })} -
    - ) -} diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/to_process.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/to_process.py deleted file mode 100644 index 7ba9d44198233b94bea1b01c6135416170eac925..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/anyio/to_process.py +++ /dev/null @@ -1,249 +0,0 @@ -from __future__ import annotations - -import os -import pickle -import subprocess -import sys -from collections import deque -from importlib.util import module_from_spec, spec_from_file_location -from typing import Callable, TypeVar, cast - -from ._core._eventloop import current_time, get_asynclib, get_cancelled_exc_class -from ._core._exceptions import BrokenWorkerProcess -from ._core._subprocesses import open_process -from ._core._synchronization import CapacityLimiter -from ._core._tasks import CancelScope, fail_after -from .abc import ByteReceiveStream, ByteSendStream, Process -from .lowlevel import RunVar, checkpoint_if_cancelled -from .streams.buffered import BufferedByteReceiveStream - -WORKER_MAX_IDLE_TIME = 300 # 5 minutes - -T_Retval = TypeVar("T_Retval") -_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers") -_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar( - "_process_pool_idle_workers" -) -_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter") - - -async def run_sync( - func: Callable[..., T_Retval], - *args: object, - cancellable: bool = False, - limiter: CapacityLimiter | None = None, -) -> T_Retval: - """ - Call the given function with the given arguments in a worker process. - - If the ``cancellable`` option is enabled and the task waiting for its completion is cancelled, - the worker process running it will be abruptly terminated using SIGKILL (or - ``terminateProcess()`` on Windows). - - :param func: a callable - :param args: positional arguments for the callable - :param cancellable: ``True`` to allow cancellation of the operation while it's running - :param limiter: capacity limiter to use to limit the total amount of processes running - (if omitted, the default limiter is used) - :return: an awaitable that yields the return value of the function. - - """ - - async def send_raw_command(pickled_cmd: bytes) -> object: - try: - await stdin.send(pickled_cmd) - response = await buffered.receive_until(b"\n", 50) - status, length = response.split(b" ") - if status not in (b"RETURN", b"EXCEPTION"): - raise RuntimeError( - f"Worker process returned unexpected response: {response!r}" - ) - - pickled_response = await buffered.receive_exactly(int(length)) - except BaseException as exc: - workers.discard(process) - try: - process.kill() - with CancelScope(shield=True): - await process.aclose() - except ProcessLookupError: - pass - - if isinstance(exc, get_cancelled_exc_class()): - raise - else: - raise BrokenWorkerProcess from exc - - retval = pickle.loads(pickled_response) - if status == b"EXCEPTION": - assert isinstance(retval, BaseException) - raise retval - else: - return retval - - # First pickle the request before trying to reserve a worker process - await checkpoint_if_cancelled() - request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL) - - # If this is the first run in this event loop thread, set up the necessary variables - try: - workers = _process_pool_workers.get() - idle_workers = _process_pool_idle_workers.get() - except LookupError: - workers = set() - idle_workers = deque() - _process_pool_workers.set(workers) - _process_pool_idle_workers.set(idle_workers) - get_asynclib().setup_process_pool_exit_at_shutdown(workers) - - async with (limiter or current_default_process_limiter()): - # Pop processes from the pool (starting from the most recently used) until we find one that - # hasn't exited yet - process: Process - while idle_workers: - process, idle_since = idle_workers.pop() - if process.returncode is None: - stdin = cast(ByteSendStream, process.stdin) - buffered = BufferedByteReceiveStream( - cast(ByteReceiveStream, process.stdout) - ) - - # Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME seconds or - # longer - now = current_time() - killed_processes: list[Process] = [] - while idle_workers: - if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME: - break - - process, idle_since = idle_workers.popleft() - process.kill() - workers.remove(process) - killed_processes.append(process) - - with CancelScope(shield=True): - for process in killed_processes: - await process.aclose() - - break - - workers.remove(process) - else: - command = [sys.executable, "-u", "-m", __name__] - process = await open_process( - command, stdin=subprocess.PIPE, stdout=subprocess.PIPE - ) - try: - stdin = cast(ByteSendStream, process.stdin) - buffered = BufferedByteReceiveStream( - cast(ByteReceiveStream, process.stdout) - ) - with fail_after(20): - message = await buffered.receive(6) - - if message != b"READY\n": - raise BrokenWorkerProcess( - f"Worker process returned unexpected response: {message!r}" - ) - - main_module_path = getattr(sys.modules["__main__"], "__file__", None) - pickled = pickle.dumps( - ("init", sys.path, main_module_path), - protocol=pickle.HIGHEST_PROTOCOL, - ) - await send_raw_command(pickled) - except (BrokenWorkerProcess, get_cancelled_exc_class()): - raise - except BaseException as exc: - process.kill() - raise BrokenWorkerProcess( - "Error during worker process initialization" - ) from exc - - workers.add(process) - - with CancelScope(shield=not cancellable): - try: - return cast(T_Retval, await send_raw_command(request)) - finally: - if process in workers: - idle_workers.append((process, current_time())) - - -def current_default_process_limiter() -> CapacityLimiter: - """ - Return the capacity limiter that is used by default to limit the number of worker processes. - - :return: a capacity limiter object - - """ - try: - return _default_process_limiter.get() - except LookupError: - limiter = CapacityLimiter(os.cpu_count() or 2) - _default_process_limiter.set(limiter) - return limiter - - -def process_worker() -> None: - # Redirect standard streams to os.devnull so that user code won't interfere with the - # parent-worker communication - stdin = sys.stdin - stdout = sys.stdout - sys.stdin = open(os.devnull) - sys.stdout = open(os.devnull, "w") - - stdout.buffer.write(b"READY\n") - while True: - retval = exception = None - try: - command, *args = pickle.load(stdin.buffer) - except EOFError: - return - except BaseException as exc: - exception = exc - else: - if command == "run": - func, args = args - try: - retval = func(*args) - except BaseException as exc: - exception = exc - elif command == "init": - main_module_path: str | None - sys.path, main_module_path = args - del sys.modules["__main__"] - if main_module_path: - # Load the parent's main module but as __mp_main__ instead of __main__ - # (like multiprocessing does) to avoid infinite recursion - try: - spec = spec_from_file_location("__mp_main__", main_module_path) - if spec and spec.loader: - main = module_from_spec(spec) - spec.loader.exec_module(main) - sys.modules["__main__"] = main - except BaseException as exc: - exception = exc - - try: - if exception is not None: - status = b"EXCEPTION" - pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL) - else: - status = b"RETURN" - pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL) - except BaseException as exc: - exception = exc - status = b"EXCEPTION" - pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL) - - stdout.buffer.write(b"%s %d\n" % (status, len(pickled))) - stdout.buffer.write(pickled) - - # Respect SIGTERM - if isinstance(exception, SystemExit): - raise exception - - -if __name__ == "__main__": - process_worker() diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fastapi/security/api_key.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fastapi/security/api_key.py deleted file mode 100644 index 8b2c5c08059fc6911ceb34efdff8f5bf80df469f..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fastapi/security/api_key.py +++ /dev/null @@ -1,98 +0,0 @@ -from typing import Optional - -from fastapi.openapi.models import APIKey, APIKeyIn -from fastapi.security.base import SecurityBase -from starlette.exceptions import HTTPException -from starlette.requests import Request -from starlette.status import HTTP_403_FORBIDDEN - - -class APIKeyBase(SecurityBase): - pass - - -class APIKeyQuery(APIKeyBase): - def __init__( - self, - *, - name: str, - scheme_name: Optional[str] = None, - description: Optional[str] = None, - auto_error: bool = True, - ): - self.model: APIKey = APIKey( - **{"in": APIKeyIn.query}, # type: ignore[arg-type] - name=name, - description=description, - ) - self.scheme_name = scheme_name or self.__class__.__name__ - self.auto_error = auto_error - - async def __call__(self, request: Request) -> Optional[str]: - api_key = request.query_params.get(self.model.name) - if not api_key: - if self.auto_error: - raise HTTPException( - status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" - ) - else: - return None - return api_key - - -class APIKeyHeader(APIKeyBase): - def __init__( - self, - *, - name: str, - scheme_name: Optional[str] = None, - description: Optional[str] = None, - auto_error: bool = True, - ): - self.model: APIKey = APIKey( - **{"in": APIKeyIn.header}, # type: ignore[arg-type] - name=name, - description=description, - ) - self.scheme_name = scheme_name or self.__class__.__name__ - self.auto_error = auto_error - - async def __call__(self, request: Request) -> Optional[str]: - api_key = request.headers.get(self.model.name) - if not api_key: - if self.auto_error: - raise HTTPException( - status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" - ) - else: - return None - return api_key - - -class APIKeyCookie(APIKeyBase): - def __init__( - self, - *, - name: str, - scheme_name: Optional[str] = None, - description: Optional[str] = None, - auto_error: bool = True, - ): - self.model: APIKey = APIKey( - **{"in": APIKeyIn.cookie}, # type: ignore[arg-type] - name=name, - description=description, - ) - self.scheme_name = scheme_name or self.__class__.__name__ - self.auto_error = auto_error - - async def __call__(self, request: Request) -> Optional[str]: - api_key = request.cookies.get(self.model.name) - if not api_key: - if self.auto_error: - raise HTTPException( - status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" - ) - else: - return None - return api_key diff --git a/spaces/johnrobinsn/MidasDepthEstimation/depth_viewer.py b/spaces/johnrobinsn/MidasDepthEstimation/depth_viewer.py deleted file mode 100644 index 13edd09991c0ad3f263c46ebc6ab99e81dff6082..0000000000000000000000000000000000000000 --- a/spaces/johnrobinsn/MidasDepthEstimation/depth_viewer.py +++ /dev/null @@ -1,156 +0,0 @@ -import cv2 -import base64 -import numpy as np - -_viewer_html = ''' - - - - - - - - - - - - - - - -
    - -
    - - - -''' - -image_url_marker = '{{{image_url_marker}}}' -depth_url_marker = '{{{depth_url_marker}}}' - -def depthviewer2html(image,depth): - image_rgb = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) - _, buffer = cv2.imencode('.jpg',image_rgb) - image_data_url = 'data:image/jpeg;base64,'+base64.b64encode(buffer).decode('utf-8') - _, buffer = cv2.imencode('.png',np.array(depth)) - mask_data_url = 'data:image/png;base64,'+base64.b64encode(buffer).decode('utf-8') - vhtml = str(_viewer_html).replace(image_url_marker,image_data_url).replace(depth_url_marker,mask_data_url) - e = base64.b64encode(bytes(vhtml,'utf-8')).decode('utf-8') - url = f'data:text/html;base64,{e}' - h = f'' - return h \ No newline at end of file diff --git a/spaces/jpfearnworks/ai_agents/modules/vector_stores/embedding/instructorxl.py b/spaces/jpfearnworks/ai_agents/modules/vector_stores/embedding/instructorxl.py deleted file mode 100644 index c58fcb3cbca8b2271be6e270ed89585c5ea2745e..0000000000000000000000000000000000000000 --- a/spaces/jpfearnworks/ai_agents/modules/vector_stores/embedding/instructorxl.py +++ /dev/null @@ -1,7 +0,0 @@ -from langchain.embeddings import HuggingFaceInstructEmbeddings - - -def get_default_instructor_embedding(): - instructor_embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl", - model_kwargs={"device": "cuda"}) - return instructor_embeddings \ No newline at end of file diff --git a/spaces/jyseo/3DFuse/my3d.py b/spaces/jyseo/3DFuse/my3d.py deleted file mode 100644 index eafc5e7e104c7ccb70c38fbb3df7cba8dcf64105..0000000000000000000000000000000000000000 --- a/spaces/jyseo/3DFuse/my3d.py +++ /dev/null @@ -1,161 +0,0 @@ -# some tools developed for the vision class -import numpy as np -from numpy import cross, tan -from numpy.linalg import norm, inv - - - -def normalize(v): - return v / norm(v) - - -def camera_pose(eye, front, up): - z = normalize(-1 * front) - x = normalize(cross(up, z)) - y = normalize(cross(z, x)) - - # convert to col vector - x = x.reshape(-1, 1) - y = y.reshape(-1, 1) - z = z.reshape(-1, 1) - eye = eye.reshape(-1, 1) - - pose = np.block([ - [x, y, z, eye], - [0, 0, 0, 1] - ]) - return pose - - -def compute_extrinsics(eye, front, up): - pose = camera_pose(eye, front, up) - world_2_cam = inv(pose) - return world_2_cam - - -def compute_intrinsics(aspect_ratio, fov, img_height_in_pix): - # aspect ratio is w / h - ndc = compute_proj_to_normalized(aspect_ratio, fov) - - # anything beyond [-1, 1] should be discarded - # this did not mention how to do z-clipping; - - ndc_to_img = compute_normalized_to_img_trans(aspect_ratio, img_height_in_pix) - intrinsic = ndc_to_img @ ndc - return intrinsic - - -def compute_proj_to_normalized(aspect, fov): - # compared to standard OpenGL NDC intrinsic, - # this skips the 3rd row treatment on z. hence the name partial_ndc - fov_in_rad = fov / 180 * np.pi - t = tan(fov_in_rad / 2) # tan half fov - partial_ndc_intrinsic = np.array([ - [1 / (t * aspect), 0, 0, 0], - [0, 1 / t, 0, 0], - [0, 0, -1, 0] # copy the negative distance for division - ]) - return partial_ndc_intrinsic - - -def compute_normalized_to_img_trans(aspect, img_height_in_pix): - img_h = img_height_in_pix - img_w = img_height_in_pix * aspect - - # note the OpenGL convention that (0, 0) sits at the center of the pixel; - # hence the extra -0.5 translation - # this is useful when you shoot rays through a pixel to the scene - ndc_to_img = np.array([ - [img_w / 2, 0, img_w / 2 - 0.5], - [0, img_h / 2, img_h / 2 - 0.5], - [0, 0, 1] - ]) - - img_y_coord_flip = np.array([ - [1, 0, 0], - [0, -1, img_h - 1], # note the -1 - [0, 0, 1] - ]) - - # the product of the above 2 matrices is equivalent to adding - # - sign to the (1, 1) entry - # you could have simply written - # ndc_to_img = np.array([ - # [img_w / 2, 0, img_w / 2 - 0.5], - # [0, -img_h / 2, img_h / 2 - 0.5], - # [0, 0, 1] - # ]) - - ndc_to_img = img_y_coord_flip @ ndc_to_img - return ndc_to_img - - -def unproject(K, pixel_coords, depth=1.0): - """sometimes also referred to as backproject - pixel_coords: [n, 2] pixel locations - depth: [n,] or [,] depth value. of a shape that is broadcastable with pix coords - """ - K = K[0:3, 0:3] - - pixel_coords = as_homogeneous(pixel_coords) - pixel_coords = pixel_coords.T # [2+1, n], so that mat mult is on the left - - # this will give points with z = -1, which is exactly what you want since - # your camera is facing the -ve z axis - pts = inv(K) @ pixel_coords - - pts = pts * depth # [3, n] * [n,] broadcast - pts = pts.T - pts = as_homogeneous(pts) - return pts - - -""" -these two functions are changed so that they can handle arbitrary number of -dimensions >=1 -""" - - -def homogenize(pts): - # pts: [..., d], where last dim of the d is the diviser - *front, d = pts.shape - pts = pts / pts[..., -1].reshape(*front, 1) - return pts - - -def as_homogeneous(pts, lib=np): - # pts: [..., d] - *front, d = pts.shape - points = lib.ones((*front, d + 1)) - points[..., :d] = pts - return points - - -def simple_point_render(pts, img_w, img_h, fov, eye, front, up): - """ - pts: [N, 3] - """ - canvas = np.ones((img_h, img_w, 3)) - - pts = as_homogeneous(pts) - - E = compute_extrinsics(eye, front, up) - world_2_ndc = compute_proj_to_normalized(img_w / img_h, fov) - ndc_to_img = compute_normalized_to_img_trans(img_w / img_h, img_h) - - pts = pts @ E.T - pts = pts @ world_2_ndc.T - pts = homogenize(pts) - - # now filter out outliers beyond [-1, 1] - outlier_mask = (np.abs(pts) > 1.0).any(axis=1) - pts = pts[~outlier_mask] - - pts = pts @ ndc_to_img.T - - # now draw each point - pts = np.rint(pts).astype(np.int32) - xs, ys, _ = pts.T - canvas[ys, xs] = (1, 0, 0) - - return canvas diff --git a/spaces/kai0226/hotdog-detection/README.md b/spaces/kai0226/hotdog-detection/README.md deleted file mode 100644 index 87006a67add6db183363b9ae340000352d4b1831..0000000000000000000000000000000000000000 --- a/spaces/kai0226/hotdog-detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Hotdog Detection -emoji: 😻 -colorFrom: blue -colorTo: pink -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kausmos/clothsy/model/README.md b/spaces/kausmos/clothsy/model/README.md deleted file mode 100644 index 9345a404b1c759ba192be71bfe32bc304f7a94e7..0000000000000000000000000000000000000000 --- a/spaces/kausmos/clothsy/model/README.md +++ /dev/null @@ -1,176 +0,0 @@ ---- -pipeline_tag: sentence-similarity -tags: -- sentence-transformers -- feature-extraction -- sentence-similarity -language: en -license: apache-2.0 -datasets: -- s2orc -- flax-sentence-embeddings/stackexchange_xml -- MS Marco -- gooaq -- yahoo_answers_topics -- code_search_net -- search_qa -- eli5 -- snli -- multi_nli -- wikihow -- natural_questions -- trivia_qa -- embedding-data/sentence-compression -- embedding-data/flickr30k-captions -- embedding-data/altlex -- embedding-data/simple-wiki -- embedding-data/QQP -- embedding-data/SPECTER -- embedding-data/PAQ_pairs -- embedding-data/WikiAnswers - ---- - - -# all-MiniLM-L6-v2 -This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. - -## Usage (Sentence-Transformers) -Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: - -``` -pip install -U sentence-transformers -``` - -Then you can use the model like this: -```python -from sentence_transformers import SentenceTransformer -sentences = ["This is an example sentence", "Each sentence is converted"] - -model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') -embeddings = model.encode(sentences) -print(embeddings) -``` - -## Usage (HuggingFace Transformers) -Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. - -```python -from transformers import AutoTokenizer, AutoModel -import torch -import torch.nn.functional as F - -#Mean Pooling - Take attention mask into account for correct averaging -def mean_pooling(model_output, attention_mask): - token_embeddings = model_output[0] #First element of model_output contains all token embeddings - input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() - return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) - - -# Sentences we want sentence embeddings for -sentences = ['This is an example sentence', 'Each sentence is converted'] - -# Load model from HuggingFace Hub -tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2') -model = AutoModel.from_pretrained('sentence-transformers/all-MiniLM-L6-v2') - -# Tokenize sentences -encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') - -# Compute token embeddings -with torch.no_grad(): - model_output = model(**encoded_input) - -# Perform pooling -sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) - -# Normalize embeddings -sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1) - -print("Sentence embeddings:") -print(sentence_embeddings) -``` - -## Evaluation Results - -For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/all-MiniLM-L6-v2) - ------- - -## Background - -The project aims to train sentence embedding models on very large sentence level datasets using a self-supervised -contrastive learning objective. We used the pretrained [`nreimers/MiniLM-L6-H384-uncased`](https://huggingface.co/nreimers/MiniLM-L6-H384-uncased) model and fine-tuned in on a -1B sentence pairs dataset. We use a contrastive learning objective: given a sentence from the pair, the model should predict which out of a set of randomly sampled other sentences, was actually paired with it in our dataset. - -We developped this model during the -[Community week using JAX/Flax for NLP & CV](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104), -organized by Hugging Face. We developped this model as part of the project: -[Train the Best Sentence Embedding Model Ever with 1B Training Pairs](https://discuss.huggingface.co/t/train-the-best-sentence-embedding-model-ever-with-1b-training-pairs/7354). We benefited from efficient hardware infrastructure to run the project: 7 TPUs v3-8, as well as intervention from Googles Flax, JAX, and Cloud team member about efficient deep learning frameworks. - -## Intended uses - -Our model is intented to be used as a sentence and short paragraph encoder. Given an input text, it ouptuts a vector which captures -the semantic information. The sentence vector may be used for information retrieval, clustering or sentence similarity tasks. - -By default, input text longer than 256 word pieces is truncated. - - -## Training procedure - -### Pre-training - -We use the pretrained [`nreimers/MiniLM-L6-H384-uncased`](https://huggingface.co/nreimers/MiniLM-L6-H384-uncased) model. Please refer to the model card for more detailed information about the pre-training procedure. - -### Fine-tuning - -We fine-tune the model using a contrastive objective. Formally, we compute the cosine similarity from each possible sentence pairs from the batch. -We then apply the cross entropy loss by comparing with true pairs. - -#### Hyper parameters - -We trained ou model on a TPU v3-8. We train the model during 100k steps using a batch size of 1024 (128 per TPU core). -We use a learning rate warm up of 500. The sequence length was limited to 128 tokens. We used the AdamW optimizer with -a 2e-5 learning rate. The full training script is accessible in this current repository: `train_script.py`. - -#### Training data - -We use the concatenation from multiple datasets to fine-tune our model. The total number of sentence pairs is above 1 billion sentences. -We sampled each dataset given a weighted probability which configuration is detailed in the `data_config.json` file. - - -| Dataset | Paper | Number of training tuples | -|--------------------------------------------------------|:----------------------------------------:|:--------------------------:| -| [Reddit comments (2015-2018)](https://github.com/PolyAI-LDN/conversational-datasets/tree/master/reddit) | [paper](https://arxiv.org/abs/1904.06472) | 726,484,430 | -| [S2ORC](https://github.com/allenai/s2orc) Citation pairs (Abstracts) | [paper](https://aclanthology.org/2020.acl-main.447/) | 116,288,806 | -| [WikiAnswers](https://github.com/afader/oqa#wikianswers-corpus) Duplicate question pairs | [paper](https://doi.org/10.1145/2623330.2623677) | 77,427,422 | -| [PAQ](https://github.com/facebookresearch/PAQ) (Question, Answer) pairs | [paper](https://arxiv.org/abs/2102.07033) | 64,371,441 | -| [S2ORC](https://github.com/allenai/s2orc) Citation pairs (Titles) | [paper](https://aclanthology.org/2020.acl-main.447/) | 52,603,982 | -| [S2ORC](https://github.com/allenai/s2orc) (Title, Abstract) | [paper](https://aclanthology.org/2020.acl-main.447/) | 41,769,185 | -| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title, Body) pairs | - | 25,316,456 | -| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title+Body, Answer) pairs | - | 21,396,559 | -| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title, Answer) pairs | - | 21,396,559 | -| [MS MARCO](https://microsoft.github.io/msmarco/) triplets | [paper](https://doi.org/10.1145/3404835.3462804) | 9,144,553 | -| [GOOAQ: Open Question Answering with Diverse Answer Types](https://github.com/allenai/gooaq) | [paper](https://arxiv.org/pdf/2104.08727.pdf) | 3,012,496 | -| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 1,198,260 | -| [Code Search](https://huggingface.co/datasets/code_search_net) | - | 1,151,414 | -| [COCO](https://cocodataset.org/#home) Image captions | [paper](https://link.springer.com/chapter/10.1007%2F978-3-319-10602-1_48) | 828,395| -| [SPECTER](https://github.com/allenai/specter) citation triplets | [paper](https://doi.org/10.18653/v1/2020.acl-main.207) | 684,100 | -| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Question, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 681,164 | -| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Question) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 659,896 | -| [SearchQA](https://huggingface.co/datasets/search_qa) | [paper](https://arxiv.org/abs/1704.05179) | 582,261 | -| [Eli5](https://huggingface.co/datasets/eli5) | [paper](https://doi.org/10.18653/v1/p19-1346) | 325,475 | -| [Flickr 30k](https://shannon.cs.illinois.edu/DenotationGraph/) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/229/33) | 317,695 | -| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (titles) | | 304,525 | -| AllNLI ([SNLI](https://nlp.stanford.edu/projects/snli/) and [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/) | [paper SNLI](https://doi.org/10.18653/v1/d15-1075), [paper MultiNLI](https://doi.org/10.18653/v1/n18-1101) | 277,230 | -| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (bodies) | | 250,519 | -| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (titles+bodies) | | 250,460 | -| [Sentence Compression](https://github.com/google-research-datasets/sentence-compression) | [paper](https://www.aclweb.org/anthology/D13-1155/) | 180,000 | -| [Wikihow](https://github.com/pvl/wikihow_pairs_dataset) | [paper](https://arxiv.org/abs/1810.09305) | 128,542 | -| [Altlex](https://github.com/chridey/altlex/) | [paper](https://aclanthology.org/P16-1135.pdf) | 112,696 | -| [Quora Question Triplets](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) | - | 103,663 | -| [Simple Wikipedia](https://cs.pomona.edu/~dkauchak/simplification/) | [paper](https://www.aclweb.org/anthology/P11-2117/) | 102,225 | -| [Natural Questions (NQ)](https://ai.google.com/research/NaturalQuestions) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/1455) | 100,231 | -| [SQuAD2.0](https://rajpurkar.github.io/SQuAD-explorer/) | [paper](https://aclanthology.org/P18-2124.pdf) | 87,599 | -| [TriviaQA](https://huggingface.co/datasets/trivia_qa) | - | 73,346 | -| **Total** | | **1,170,060,424** | \ No newline at end of file diff --git a/spaces/kcagle/AutoGPT/Dockerfile b/spaces/kcagle/AutoGPT/Dockerfile deleted file mode 100644 index 8396154998f32a50d55c199a674b638d5cf7bda2..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# Use an official Python base image from the Docker Hub -FROM python:3.10-slim - -# Install git -RUN apt-get -y update -RUN apt-get -y install git chromium-driver - -# Install Xvfb and other dependencies for headless browser testing -RUN apt-get update \ - && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates - -# Install Firefox / Chromium -RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ - && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ - && apt-get update \ - && apt-get install -y chromium firefox-esr - -# Set environment variables -ENV PIP_NO_CACHE_DIR=yes \ - PYTHONUNBUFFERED=1 \ - PYTHONDONTWRITEBYTECODE=1 - -# Create a non-root user and set permissions -RUN useradd --create-home appuser -WORKDIR /home/appuser -RUN chown appuser:appuser /home/appuser -USER appuser - -# Copy the requirements.txt file and install the requirements -COPY --chown=appuser:appuser requirements.txt . -RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ - pip install --no-cache-dir --user -r requirements.txt - -# Copy the application files -COPY --chown=appuser:appuser autogpt/ ./autogpt - -# Set the entrypoint -ENTRYPOINT ["python", "-m", "autogpt"] diff --git a/spaces/kepl/gpt/client/css/sidebar.css b/spaces/kepl/gpt/client/css/sidebar.css deleted file mode 100644 index 310887c60443abd491c3162f62e44b5ec333e50d..0000000000000000000000000000000000000000 --- a/spaces/kepl/gpt/client/css/sidebar.css +++ /dev/null @@ -1,197 +0,0 @@ -.sidebar { - max-width: 260px; - padding: var(--section-gap); - flex-shrink: 0; - display: flex; - flex-direction: column; - justify-content: space-between; -} - -.sidebar .title { - font-size: 14px; - font-weight: 500; -} - -.sidebar .conversation-sidebar { - padding: 8px 12px; - display: flex; - gap: 18px; - align-items: center; - user-select: none; - justify-content: space-between; -} - -.sidebar .conversation-sidebar .left { - cursor: pointer; - display: flex; - align-items: center; - gap: 10px; -} - -.sidebar i { - color: var(--conversations); - cursor: pointer; -} - -.sidebar .top { - display: flex; - flex-direction: column; - overflow: hidden; - gap: 16px; - padding-right: 8px; -} - -.sidebar .top:hover { - overflow: auto; -} - -.sidebar .info { - padding: 8px 12px 0px 12px; - display: flex; - align-items: center; - justify-content: center; - user-select: none; - background: transparent; - width: 100%; - border: none; - text-decoration: none; -} - -.sidebar .info span { - color: var(--conversations); - line-height: 1.5; - font-size: 0.75rem; -} - -.sidebar .info i::before { - margin-right: 8px; -} - -.sidebar-footer { - width: 100%; - margin-top: 16px; - display: flex; - flex-direction: column; -} - -.sidebar-footer button { - cursor: pointer; - user-select: none; - background: transparent; -} - -.sidebar.shown { - position: fixed; - top: 0; - left: 0; - width: 100%; - height: 100%; - z-index: 1000; -} - -.sidebar.shown .box { - background-color: #16171a; - width: 80%; - height: 100%; - overflow-y: auto; -} - -@keyframes spinner { - to { - transform: rotate(360deg); - } -} - -/* scrollbar */ -.sidebar .top::-webkit-scrollbar { - width: 4px; - padding: 8px 0px; -} - -.sidebar .top::-webkit-scrollbar-track { - background-color: #ffffff00; -} - -.sidebar .top::-webkit-scrollbar-thumb { - background-color: #555555; - border-radius: 10px; -} - -.spinner:before { - content: ""; - box-sizing: border-box; - position: absolute; - top: 50%; - left: 45%; - width: 20px; - height: 20px; - border-radius: 50%; - border: 1px solid var(--conversations); - border-top-color: white; - animation: spinner 0.6s linear infinite; -} - -.menu-button { - display: none !important; - position: absolute; - z-index: 100000; - top: 0; - left: 0; - margin: 10px; - font-size: 1rem; - cursor: pointer; - width: 30px; - height: 30px; - justify-content: center; - align-items: center; - transition: 0.33s; -} - -.menu-button i { - transition: 0.33s; -} - -.rotated { - transform: rotate(360deg); -} - -.menu-button.rotated { - position: fixed; - top: 10px; - left: 10px; - z-index: 1001; -} - -@media screen and (max-width: 990px) { - .sidebar { - display: none; - width: 100%; - max-width: none; - } - - .menu-button { - display: flex !important; - } -} - -@media (max-width: 990px) { - .sidebar .top { - padding-top: 48px; - } -} - -@media (min-width: 768px) { - .sidebar.shown { - position: static; - width: auto; - height: auto; - background-color: transparent; - } - - .sidebar.shown .box { - background-color: #16171a; - width: auto; - height: auto; - overflow-y: auto; - } -} diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/configs/speed.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/configs/speed.py deleted file mode 100644 index 45e95237da65e44f35a172c25ac6dc4e313e4eae..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/configs/speed.py +++ /dev/null @@ -1,23 +0,0 @@ -from easydict import EasyDict as edict - -# configs for test speed - -config = edict() -config.loss = "arcface" -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "synthetic" -config.num_classes = 100 * 10000 -config.num_epoch = 30 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = [] diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/facerender/modules/make_animation.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/facerender/modules/make_animation.py deleted file mode 100644 index 3360c53501a064f35d7db21a5361f89aa9658b42..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/facerender/modules/make_animation.py +++ /dev/null @@ -1,170 +0,0 @@ -from scipy.spatial import ConvexHull -import torch -import torch.nn.functional as F -import numpy as np -from tqdm import tqdm - -def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, - use_relative_movement=False, use_relative_jacobian=False): - if adapt_movement_scale: - source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume - driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume - adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) - else: - adapt_movement_scale = 1 - - kp_new = {k: v for k, v in kp_driving.items()} - - if use_relative_movement: - kp_value_diff = (kp_driving['value'] - kp_driving_initial['value']) - kp_value_diff *= adapt_movement_scale - kp_new['value'] = kp_value_diff + kp_source['value'] - - if use_relative_jacobian: - jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) - kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) - - return kp_new - -def headpose_pred_to_degree(pred): - device = pred.device - idx_tensor = [idx for idx in range(66)] - idx_tensor = torch.FloatTensor(idx_tensor).type_as(pred).to(device) - pred = F.softmax(pred) - degree = torch.sum(pred*idx_tensor, 1) * 3 - 99 - return degree - -def get_rotation_matrix(yaw, pitch, roll): - yaw = yaw / 180 * 3.14 - pitch = pitch / 180 * 3.14 - roll = roll / 180 * 3.14 - - roll = roll.unsqueeze(1) - pitch = pitch.unsqueeze(1) - yaw = yaw.unsqueeze(1) - - pitch_mat = torch.cat([torch.ones_like(pitch), torch.zeros_like(pitch), torch.zeros_like(pitch), - torch.zeros_like(pitch), torch.cos(pitch), -torch.sin(pitch), - torch.zeros_like(pitch), torch.sin(pitch), torch.cos(pitch)], dim=1) - pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3) - - yaw_mat = torch.cat([torch.cos(yaw), torch.zeros_like(yaw), torch.sin(yaw), - torch.zeros_like(yaw), torch.ones_like(yaw), torch.zeros_like(yaw), - -torch.sin(yaw), torch.zeros_like(yaw), torch.cos(yaw)], dim=1) - yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3) - - roll_mat = torch.cat([torch.cos(roll), -torch.sin(roll), torch.zeros_like(roll), - torch.sin(roll), torch.cos(roll), torch.zeros_like(roll), - torch.zeros_like(roll), torch.zeros_like(roll), torch.ones_like(roll)], dim=1) - roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3) - - rot_mat = torch.einsum('bij,bjk,bkm->bim', pitch_mat, yaw_mat, roll_mat) - - return rot_mat - -def keypoint_transformation(kp_canonical, he, wo_exp=False): - kp = kp_canonical['value'] # (bs, k, 3) - yaw, pitch, roll= he['yaw'], he['pitch'], he['roll'] - yaw = headpose_pred_to_degree(yaw) - pitch = headpose_pred_to_degree(pitch) - roll = headpose_pred_to_degree(roll) - - if 'yaw_in' in he: - yaw = he['yaw_in'] - if 'pitch_in' in he: - pitch = he['pitch_in'] - if 'roll_in' in he: - roll = he['roll_in'] - - rot_mat = get_rotation_matrix(yaw, pitch, roll) # (bs, 3, 3) - - t, exp = he['t'], he['exp'] - if wo_exp: - exp = exp*0 - - # keypoint rotation - kp_rotated = torch.einsum('bmp,bkp->bkm', rot_mat, kp) - - # keypoint translation - t[:, 0] = t[:, 0]*0 - t[:, 2] = t[:, 2]*0 - t = t.unsqueeze(1).repeat(1, kp.shape[1], 1) - kp_t = kp_rotated + t - - # add expression deviation - exp = exp.view(exp.shape[0], -1, 3) - kp_transformed = kp_t + exp - - return {'value': kp_transformed} - - - -def make_animation(source_image, source_semantics, target_semantics, - generator, kp_detector, he_estimator, mapping, - yaw_c_seq=None, pitch_c_seq=None, roll_c_seq=None, - use_exp=True, use_half=False): - with torch.no_grad(): - predictions = [] - - kp_canonical = kp_detector(source_image) - he_source = mapping(source_semantics) - kp_source = keypoint_transformation(kp_canonical, he_source) - - for frame_idx in tqdm(range(target_semantics.shape[1]), 'Face Renderer:'): - # still check the dimension - # print(target_semantics.shape, source_semantics.shape) - target_semantics_frame = target_semantics[:, frame_idx] - he_driving = mapping(target_semantics_frame) - if yaw_c_seq is not None: - he_driving['yaw_in'] = yaw_c_seq[:, frame_idx] - if pitch_c_seq is not None: - he_driving['pitch_in'] = pitch_c_seq[:, frame_idx] - if roll_c_seq is not None: - he_driving['roll_in'] = roll_c_seq[:, frame_idx] - - kp_driving = keypoint_transformation(kp_canonical, he_driving) - - kp_norm = kp_driving - out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm) - ''' - source_image_new = out['prediction'].squeeze(1) - kp_canonical_new = kp_detector(source_image_new) - he_source_new = he_estimator(source_image_new) - kp_source_new = keypoint_transformation(kp_canonical_new, he_source_new, wo_exp=True) - kp_driving_new = keypoint_transformation(kp_canonical_new, he_driving, wo_exp=True) - out = generator(source_image_new, kp_source=kp_source_new, kp_driving=kp_driving_new) - ''' - predictions.append(out['prediction']) - predictions_ts = torch.stack(predictions, dim=1) - return predictions_ts - -class AnimateModel(torch.nn.Module): - """ - Merge all generator related updates into single model for better multi-gpu usage - """ - - def __init__(self, generator, kp_extractor, mapping): - super(AnimateModel, self).__init__() - self.kp_extractor = kp_extractor - self.generator = generator - self.mapping = mapping - - self.kp_extractor.eval() - self.generator.eval() - self.mapping.eval() - - def forward(self, x): - - source_image = x['source_image'] - source_semantics = x['source_semantics'] - target_semantics = x['target_semantics'] - yaw_c_seq = x['yaw_c_seq'] - pitch_c_seq = x['pitch_c_seq'] - roll_c_seq = x['roll_c_seq'] - - predictions_video = make_animation(source_image, source_semantics, target_semantics, - self.generator, self.kp_extractor, - self.mapping, use_exp = True, - yaw_c_seq=yaw_c_seq, pitch_c_seq=pitch_c_seq, roll_c_seq=roll_c_seq) - - return predictions_video \ No newline at end of file diff --git a/spaces/kevinwang676/SadTalker/src/utils/preprocess.py b/spaces/kevinwang676/SadTalker/src/utils/preprocess.py deleted file mode 100644 index 0f784e6c3d8562e1db1bbd850b9f01843cee3c97..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/utils/preprocess.py +++ /dev/null @@ -1,170 +0,0 @@ -import numpy as np -import cv2, os, sys, torch -from tqdm import tqdm -from PIL import Image - -# 3dmm extraction -import safetensors -import safetensors.torch -from src.face3d.util.preprocess import align_img -from src.face3d.util.load_mats import load_lm3d -from src.face3d.models import networks - -from scipy.io import loadmat, savemat -from src.utils.croper import Preprocesser - - -import warnings - -from src.utils.safetensor_helper import load_x_from_safetensor -warnings.filterwarnings("ignore") - -def split_coeff(coeffs): - """ - Return: - coeffs_dict -- a dict of torch.tensors - - Parameters: - coeffs -- torch.tensor, size (B, 256) - """ - id_coeffs = coeffs[:, :80] - exp_coeffs = coeffs[:, 80: 144] - tex_coeffs = coeffs[:, 144: 224] - angles = coeffs[:, 224: 227] - gammas = coeffs[:, 227: 254] - translations = coeffs[:, 254:] - return { - 'id': id_coeffs, - 'exp': exp_coeffs, - 'tex': tex_coeffs, - 'angle': angles, - 'gamma': gammas, - 'trans': translations - } - - -class CropAndExtract(): - def __init__(self, sadtalker_path, device): - - self.propress = Preprocesser(device) - self.net_recon = networks.define_net_recon(net_recon='resnet50', use_last_fc=False, init_path='').to(device) - - if sadtalker_path['use_safetensor']: - checkpoint = safetensors.torch.load_file(sadtalker_path['checkpoint']) - self.net_recon.load_state_dict(load_x_from_safetensor(checkpoint, 'face_3drecon')) - else: - checkpoint = torch.load(sadtalker_path['path_of_net_recon_model'], map_location=torch.device(device)) - self.net_recon.load_state_dict(checkpoint['net_recon']) - - self.net_recon.eval() - self.lm3d_std = load_lm3d(sadtalker_path['dir_of_BFM_fitting']) - self.device = device - - def generate(self, input_path, save_dir, crop_or_resize='crop', source_image_flag=False, pic_size=256): - - pic_name = os.path.splitext(os.path.split(input_path)[-1])[0] - - landmarks_path = os.path.join(save_dir, pic_name+'_landmarks.txt') - coeff_path = os.path.join(save_dir, pic_name+'.mat') - png_path = os.path.join(save_dir, pic_name+'.png') - - #load input - if not os.path.isfile(input_path): - raise ValueError('input_path must be a valid path to video/image file') - elif input_path.split('.')[-1] in ['jpg', 'png', 'jpeg']: - # loader for first frame - full_frames = [cv2.imread(input_path)] - fps = 25 - else: - # loader for videos - video_stream = cv2.VideoCapture(input_path) - fps = video_stream.get(cv2.CAP_PROP_FPS) - full_frames = [] - while 1: - still_reading, frame = video_stream.read() - if not still_reading: - video_stream.release() - break - full_frames.append(frame) - if source_image_flag: - break - - x_full_frames= [cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) for frame in full_frames] - - #### crop images as the - if 'crop' in crop_or_resize.lower(): # default crop - x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512) - clx, cly, crx, cry = crop - lx, ly, rx, ry = quad - lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) - oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad) - elif 'full' in crop_or_resize.lower(): - x_full_frames, crop, quad = self.propress.crop(x_full_frames, still=True if 'ext' in crop_or_resize.lower() else False, xsize=512) - clx, cly, crx, cry = crop - lx, ly, rx, ry = quad - lx, ly, rx, ry = int(lx), int(ly), int(rx), int(ry) - oy1, oy2, ox1, ox2 = cly+ly, cly+ry, clx+lx, clx+rx - crop_info = ((ox2 - ox1, oy2 - oy1), crop, quad) - else: # resize mode - oy1, oy2, ox1, ox2 = 0, x_full_frames[0].shape[0], 0, x_full_frames[0].shape[1] - crop_info = ((ox2 - ox1, oy2 - oy1), None, None) - - frames_pil = [Image.fromarray(cv2.resize(frame,(pic_size, pic_size))) for frame in x_full_frames] - if len(frames_pil) == 0: - print('No face is detected in the input file') - return None, None - - # save crop info - for frame in frames_pil: - cv2.imwrite(png_path, cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)) - - # 2. get the landmark according to the detected face. - if not os.path.isfile(landmarks_path): - lm = self.propress.predictor.extract_keypoint(frames_pil, landmarks_path) - else: - print(' Using saved landmarks.') - lm = np.loadtxt(landmarks_path).astype(np.float32) - lm = lm.reshape([len(x_full_frames), -1, 2]) - - if not os.path.isfile(coeff_path): - # load 3dmm paramter generator from Deep3DFaceRecon_pytorch - video_coeffs, full_coeffs = [], [] - for idx in tqdm(range(len(frames_pil)), desc='3DMM Extraction In Video:'): - frame = frames_pil[idx] - W,H = frame.size - lm1 = lm[idx].reshape([-1, 2]) - - if np.mean(lm1) == -1: - lm1 = (self.lm3d_std[:, :2]+1)/2. - lm1 = np.concatenate( - [lm1[:, :1]*W, lm1[:, 1:2]*H], 1 - ) - else: - lm1[:, -1] = H - 1 - lm1[:, -1] - - trans_params, im1, lm1, _ = align_img(frame, lm1, self.lm3d_std) - - trans_params = np.array([float(item) for item in np.hsplit(trans_params, 5)]).astype(np.float32) - im_t = torch.tensor(np.array(im1)/255., dtype=torch.float32).permute(2, 0, 1).to(self.device).unsqueeze(0) - - with torch.no_grad(): - full_coeff = self.net_recon(im_t) - coeffs = split_coeff(full_coeff) - - pred_coeff = {key:coeffs[key].cpu().numpy() for key in coeffs} - - pred_coeff = np.concatenate([ - pred_coeff['exp'], - pred_coeff['angle'], - pred_coeff['trans'], - trans_params[2:][None], - ], 1) - video_coeffs.append(pred_coeff) - full_coeffs.append(full_coeff.cpu().numpy()) - - semantic_npy = np.array(video_coeffs)[:,0] - - savemat(coeff_path, {'coeff_3dmm': semantic_npy, 'full_3dmm': np.array(full_coeffs)[0]}) - - return coeff_path, png_path, crop_info diff --git a/spaces/kevinwang676/test-1/infer_pack/attentions.py b/spaces/kevinwang676/test-1/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/test-1/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/kevinwang676/voice-conversion-yourtts/bark/clonevoice.py b/spaces/kevinwang676/voice-conversion-yourtts/bark/clonevoice.py deleted file mode 100644 index 1ac4610806c2b79d5ab22567064e73c41b3c01fa..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/voice-conversion-yourtts/bark/clonevoice.py +++ /dev/null @@ -1,41 +0,0 @@ -from bark.generation import load_codec_model, generate_text_semantic, grab_best_device -from encodec.utils import convert_audio -import torchaudio -import torch -import os -import gradio - - -def clone_voice(audio_filepath, text, dest_filename, progress=gradio.Progress(track_tqdm=True)): - if len(text) < 1: - raise gradio.Error('No transcription text entered!') - - use_gpu = not os.environ.get("BARK_FORCE_CPU", False) - progress(0, desc="Loading Codec") - model = load_codec_model(use_gpu=use_gpu) - progress(0.25, desc="Converting WAV") - - # Load and pre-process the audio waveform - device = grab_best_device(use_gpu) - wav, sr = torchaudio.load(audio_filepath) - wav = convert_audio(wav, sr, model.sample_rate, model.channels) - wav = wav.unsqueeze(0).to(device) - progress(0.5, desc="Extracting codes") - - # Extract discrete codes from EnCodec - with torch.no_grad(): - encoded_frames = model.encode(wav) - codes = torch.cat([encoded[0] for encoded in encoded_frames], dim=-1).squeeze() # [n_q, T] - - # get seconds of audio - seconds = wav.shape[-1] / model.sample_rate - # generate semantic tokens - semantic_tokens = generate_text_semantic(text, max_gen_duration_s=seconds, top_k=50, top_p=.95, temp=0.7) - - # move codes to cpu - codes = codes.cpu().numpy() - - import numpy as np - output_path = dest_filename + '.npz' - np.savez(output_path, fine_prompt=codes, coarse_prompt=codes[:2, :], semantic_prompt=semantic_tokens) - return "Finished" diff --git a/spaces/kevinwang676/voice-conversion-yourtts/bark/settings.py b/spaces/kevinwang676/voice-conversion-yourtts/bark/settings.py deleted file mode 100644 index fa0658c44a1d3380ed09d3e87aa21d972a4d40a3..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/voice-conversion-yourtts/bark/settings.py +++ /dev/null @@ -1,5 +0,0 @@ -import os - -def initenv(args): - os.environ['SUNO_USE_SMALL_MODELS'] = str("-smallmodels" in args) - os.environ['BARK_FORCE_CPU'] = str("-forcecpu" in args) diff --git a/spaces/kira4424/Tacotron-zero-short-voice-clone/CODE_OF_CONDUCT.md b/spaces/kira4424/Tacotron-zero-short-voice-clone/CODE_OF_CONDUCT.md deleted file mode 100644 index 77b56791b7e900eb0a7d8a258bf78ecb449c8468..0000000000000000000000000000000000000000 --- a/spaces/kira4424/Tacotron-zero-short-voice-clone/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,130 +0,0 @@ -# Contributor Covenant Code of Conduct -## First of all -Don't be evil, never - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -babysor00@gmail.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. diff --git a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/constants.py b/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/constants.py deleted file mode 100644 index ae3e5e151342232be8e2c2a77fe6fd5798dc2a8c..0000000000000000000000000000000000000000 --- a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/constants.py +++ /dev/null @@ -1,152 +0,0 @@ -weights = {"ade20k": - [6.34517766497462, - 9.328358208955224, - 11.389521640091116, - 16.10305958132045, - 20.833333333333332, - 22.22222222222222, - 25.125628140703515, - 43.29004329004329, - 50.5050505050505, - 54.6448087431694, - 55.24861878453038, - 60.24096385542168, - 62.5, - 66.2251655629139, - 84.74576271186442, - 90.90909090909092, - 91.74311926605505, - 96.15384615384616, - 96.15384615384616, - 97.08737864077669, - 102.04081632653062, - 135.13513513513513, - 149.2537313432836, - 153.84615384615384, - 163.93442622950818, - 166.66666666666666, - 188.67924528301887, - 192.30769230769232, - 217.3913043478261, - 227.27272727272725, - 227.27272727272725, - 227.27272727272725, - 303.03030303030306, - 322.5806451612903, - 333.3333333333333, - 370.3703703703703, - 384.61538461538464, - 416.6666666666667, - 416.6666666666667, - 434.7826086956522, - 434.7826086956522, - 454.5454545454545, - 454.5454545454545, - 500.0, - 526.3157894736842, - 526.3157894736842, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 555.5555555555555, - 588.2352941176471, - 588.2352941176471, - 588.2352941176471, - 588.2352941176471, - 588.2352941176471, - 666.6666666666666, - 666.6666666666666, - 666.6666666666666, - 666.6666666666666, - 714.2857142857143, - 714.2857142857143, - 714.2857142857143, - 714.2857142857143, - 714.2857142857143, - 769.2307692307693, - 769.2307692307693, - 769.2307692307693, - 833.3333333333334, - 833.3333333333334, - 833.3333333333334, - 833.3333333333334, - 909.090909090909, - 1000.0, - 1111.111111111111, - 1111.111111111111, - 1111.111111111111, - 1111.111111111111, - 1111.111111111111, - 1250.0, - 1250.0, - 1250.0, - 1250.0, - 1250.0, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1428.5714285714287, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 1666.6666666666667, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2000.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 2500.0, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 3333.3333333333335, - 5000.0, - 5000.0, - 5000.0] -} \ No newline at end of file diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio_client/serializing.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio_client/serializing.py deleted file mode 100644 index 7b8626c8753cb323dff7938219dfeeff01208dfa..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio_client/serializing.py +++ /dev/null @@ -1,552 +0,0 @@ -from __future__ import annotations - -import json -import os -import uuid -from pathlib import Path -from typing import Any - -from gradio_client import media_data, utils -from gradio_client.data_classes import FileData - -with open(Path(__file__).parent / "types.json") as f: - serializer_types = json.load(f) - - -class Serializable: - def serialized_info(self): - """ - The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description]. - Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output - """ - return self.api_info() - - def api_info(self) -> dict[str, list[str]]: - """ - The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description]. - Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output - """ - raise NotImplementedError() - - def example_inputs(self) -> dict[str, Any]: - """ - The example inputs for this component as a dictionary whose values are example inputs compatible with this component. - Keys of the dictionary are: raw, serialized - """ - raise NotImplementedError() - - # For backwards compatibility - def input_api_info(self) -> tuple[str, str]: - api_info = self.api_info() - return (api_info["serialized_input"][0], api_info["serialized_input"][1]) - - # For backwards compatibility - def output_api_info(self) -> tuple[str, str]: - api_info = self.api_info() - return (api_info["serialized_output"][0], api_info["serialized_output"][1]) - - def serialize(self, x: Any, load_dir: str | Path = ""): - """ - Convert data from human-readable format to serialized format for a browser. - """ - return x - - def deserialize( - self, - x: Any, - save_dir: str | Path | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ): - """ - Convert data from serialized format for a browser to human-readable format. - """ - return x - - -class SimpleSerializable(Serializable): - """General class that does not perform any serialization or deserialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["SimpleSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": None, - "serialized": None, - } - - -class StringSerializable(Serializable): - """Expects a string as input/output but performs no serialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["StringSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": "Howdy!", - "serialized": "Howdy!", - } - - -class ListStringSerializable(Serializable): - """Expects a list of strings as input/output but performs no serialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["ListStringSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": ["Howdy!", "Merhaba"], - "serialized": ["Howdy!", "Merhaba"], - } - - -class BooleanSerializable(Serializable): - """Expects a boolean as input/output but performs no serialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["BooleanSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": True, - "serialized": True, - } - - -class NumberSerializable(Serializable): - """Expects a number (int/float) as input/output but performs no serialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["NumberSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": 5, - "serialized": 5, - } - - -class ImgSerializable(Serializable): - """Expects a base64 string as input/output which is serialized to a filepath.""" - - def serialized_info(self): - return {"type": "string", "description": "filepath or URL to image"} - - def api_info(self) -> dict[str, bool | dict]: - return {"info": serializer_types["ImgSerializable"], "serialized_info": True} - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": media_data.BASE64_IMAGE, - "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png", - } - - def serialize( - self, - x: str | None, - load_dir: str | Path = "", - ) -> str | None: - """ - Convert from human-friendly version of a file (string filepath) to a serialized - representation (base64). - Parameters: - x: String path to file to serialize - load_dir: Path to directory containing x - """ - if x is None or x == "": - return None - is_url = utils.is_valid_url(x) - path = x if is_url else Path(load_dir) / x - return utils.encode_url_or_file_to_base64(path) - - def deserialize( - self, - x: str | None, - save_dir: str | Path | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | None: - """ - Convert from serialized representation of a file (base64) to a human-friendly - version (string filepath). Optionally, save the file to the directory specified by save_dir - Parameters: - x: Base64 representation of image to deserialize into a string filepath - save_dir: Path to directory to save the deserialized image to - root_url: Ignored - hf_token: Ignored - """ - if x is None or x == "": - return None - file = utils.decode_base64_to_file(x, dir=save_dir) - return file.name - - -class FileSerializable(Serializable): - """Expects a dict with base64 representation of object as input/output which is serialized to a filepath.""" - - def serialized_info(self): - return self._single_file_serialized_info() - - def _single_file_api_info(self): - return { - "info": serializer_types["SingleFileSerializable"], - "serialized_info": True, - } - - def _single_file_serialized_info(self): - return {"type": "string", "description": "filepath or URL to file"} - - def _multiple_file_serialized_info(self): - return { - "type": "array", - "description": "List of filepath(s) or URL(s) to files", - "items": {"type": "string", "description": "filepath or URL to file"}, - } - - def _multiple_file_api_info(self): - return { - "info": serializer_types["MultipleFileSerializable"], - "serialized_info": True, - } - - def api_info(self) -> dict[str, dict | bool]: - return self._single_file_api_info() - - def example_inputs(self) -> dict[str, Any]: - return self._single_file_example_inputs() - - def _single_file_example_inputs(self) -> dict[str, Any]: - return { - "raw": {"is_file": False, "data": media_data.BASE64_FILE}, - "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf", - } - - def _multiple_file_example_inputs(self) -> dict[str, Any]: - return { - "raw": [{"is_file": False, "data": media_data.BASE64_FILE}], - "serialized": [ - "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf" - ], - } - - def _serialize_single( - self, x: str | FileData | None, load_dir: str | Path = "" - ) -> FileData | None: - if x is None or isinstance(x, dict): - return x - if utils.is_valid_url(x): - filename = x - size = None - else: - filename = str(Path(load_dir) / x) - size = Path(filename).stat().st_size - return { - "name": filename, - "data": utils.encode_url_or_file_to_base64(filename), - "orig_name": Path(filename).name, - "is_file": False, - "size": size, - } - - def _deserialize_single( - self, - x: str | FileData | None, - save_dir: str | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | None: - if x is None: - return None - if isinstance(x, str): - file_name = utils.decode_base64_to_file(x, dir=save_dir).name - elif isinstance(x, dict): - if x.get("is_file"): - filepath = x.get("name") - assert filepath is not None, f"The 'name' field is missing in {x}" - if root_url is not None: - file_name = utils.download_tmp_copy_of_file( - root_url + "file=" + filepath, - hf_token=hf_token, - dir=save_dir, - ).name - else: - file_name = utils.create_tmp_copy_of_file( - filepath, dir=save_dir - ).name - else: - data = x.get("data") - assert data is not None, f"The 'data' field is missing in {x}" - file_name = utils.decode_base64_to_file(data, dir=save_dir).name - else: - raise ValueError( - f"A FileSerializable component can only deserialize a string or a dict, not a {type(x)}: {x}" - ) - return file_name - - def serialize( - self, - x: str | FileData | None | list[str | FileData | None], - load_dir: str | Path = "", - ) -> FileData | None | list[FileData | None]: - """ - Convert from human-friendly version of a file (string filepath) to a - serialized representation (base64) - Parameters: - x: String path to file to serialize - load_dir: Path to directory containing x - """ - if x is None or x == "": - return None - if isinstance(x, list): - return [self._serialize_single(f, load_dir=load_dir) for f in x] - else: - return self._serialize_single(x, load_dir=load_dir) - - def deserialize( - self, - x: str | FileData | None | list[str | FileData | None], - save_dir: Path | str | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | None | list[str | None]: - """ - Convert from serialized representation of a file (base64) to a human-friendly - version (string filepath). Optionally, save the file to the directory specified by `save_dir` - Parameters: - x: Base64 representation of file to deserialize into a string filepath - save_dir: Path to directory to save the deserialized file to - root_url: If this component is loaded from an external Space, this is the URL of the Space. - hf_token: If this component is loaded from an external private Space, this is the access token for the Space - """ - if x is None: - return None - if isinstance(save_dir, Path): - save_dir = str(save_dir) - if isinstance(x, list): - return [ - self._deserialize_single( - f, save_dir=save_dir, root_url=root_url, hf_token=hf_token - ) - for f in x - ] - else: - return self._deserialize_single( - x, save_dir=save_dir, root_url=root_url, hf_token=hf_token - ) - - -class VideoSerializable(FileSerializable): - def serialized_info(self): - return {"type": "string", "description": "filepath or URL to video file"} - - def api_info(self) -> dict[str, dict | bool]: - return {"info": serializer_types["FileSerializable"], "serialized_info": True} - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": {"is_file": False, "data": media_data.BASE64_VIDEO}, - "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/video_sample.mp4", - } - - def serialize( - self, x: str | None, load_dir: str | Path = "" - ) -> tuple[FileData | None, None]: - return (super().serialize(x, load_dir), None) # type: ignore - - def deserialize( - self, - x: tuple[FileData | None, FileData | None] | None, - save_dir: Path | str | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | tuple[str | None, str | None] | None: - """ - Convert from serialized representation of a file (base64) to a human-friendly - version (string filepath). Optionally, save the file to the directory specified by `save_dir` - """ - if isinstance(x, (tuple, list)): - assert len(x) == 2, f"Expected tuple of length 2. Received: {x}" - x_as_list = [x[0], x[1]] - else: - raise ValueError(f"Expected tuple of length 2. Received: {x}") - deserialized_file = super().deserialize(x_as_list, save_dir, root_url, hf_token) # type: ignore - if isinstance(deserialized_file, list): - return deserialized_file[0] # ignore subtitles - - -class JSONSerializable(Serializable): - def serialized_info(self): - return {"type": "string", "description": "filepath to JSON file"} - - def api_info(self) -> dict[str, dict | bool]: - return {"info": serializer_types["JSONSerializable"], "serialized_info": True} - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": {"a": 1, "b": 2}, - "serialized": None, - } - - def serialize( - self, - x: str | None, - load_dir: str | Path = "", - ) -> dict | list | None: - """ - Convert from a a human-friendly version (string path to json file) to a - serialized representation (json string) - Parameters: - x: String path to json file to read to get json string - load_dir: Path to directory containing x - """ - if x is None or x == "": - return None - return utils.file_to_json(Path(load_dir) / x) - - def deserialize( - self, - x: str | dict | list, - save_dir: str | Path | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | None: - """ - Convert from serialized representation (json string) to a human-friendly - version (string path to json file). Optionally, save the file to the directory specified by `save_dir` - Parameters: - x: Json string - save_dir: Path to save the deserialized json file to - root_url: Ignored - hf_token: Ignored - """ - if x is None: - return None - return utils.dict_or_str_to_json_file(x, dir=save_dir).name - - -class GallerySerializable(Serializable): - def serialized_info(self): - return { - "type": "string", - "description": "path to directory with images and a file associating images with captions called captions.json", - } - - def api_info(self) -> dict[str, dict | bool]: - return { - "info": serializer_types["GallerySerializable"], - "serialized_info": True, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": [media_data.BASE64_IMAGE] * 2, - "serialized": [ - "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png", - ] - * 2, - } - - def serialize( - self, x: str | None, load_dir: str | Path = "" - ) -> list[list[str | None]] | None: - if x is None or x == "": - return None - files = [] - captions_file = Path(x) / "captions.json" - with captions_file.open("r") as captions_json: - captions = json.load(captions_json) - for file_name, caption in captions.items(): - img = FileSerializable().serialize(file_name) - files.append([img, caption]) - return files - - def deserialize( - self, - x: list[list[str | None]] | None, - save_dir: str = "", - root_url: str | None = None, - hf_token: str | None = None, - ) -> None | str: - if x is None: - return None - gallery_path = Path(save_dir) / str(uuid.uuid4()) - gallery_path.mkdir(exist_ok=True, parents=True) - captions = {} - for img_data in x: - if isinstance(img_data, (list, tuple)): - img_data, caption = img_data - else: - caption = None - name = FileSerializable().deserialize( - img_data, gallery_path, root_url=root_url, hf_token=hf_token - ) - captions[name] = caption - captions_file = gallery_path / "captions.json" - with captions_file.open("w") as captions_json: - json.dump(captions, captions_json) - return os.path.abspath(gallery_path) - - -SERIALIZER_MAPPING = {} -for cls in Serializable.__subclasses__(): - SERIALIZER_MAPPING[cls.__name__] = cls - for subcls in cls.__subclasses__(): - SERIALIZER_MAPPING[subcls.__name__] = subcls - -SERIALIZER_MAPPING["Serializable"] = SimpleSerializable -SERIALIZER_MAPPING["File"] = FileSerializable -SERIALIZER_MAPPING["UploadButton"] = FileSerializable - -COMPONENT_MAPPING: dict[str, type] = { - "textbox": StringSerializable, - "number": NumberSerializable, - "slider": NumberSerializable, - "checkbox": BooleanSerializable, - "checkboxgroup": ListStringSerializable, - "radio": StringSerializable, - "dropdown": SimpleSerializable, - "image": ImgSerializable, - "video": FileSerializable, - "audio": FileSerializable, - "file": FileSerializable, - "dataframe": JSONSerializable, - "timeseries": JSONSerializable, - "state": SimpleSerializable, - "button": StringSerializable, - "uploadbutton": FileSerializable, - "colorpicker": StringSerializable, - "label": JSONSerializable, - "highlightedtext": JSONSerializable, - "json": JSONSerializable, - "html": StringSerializable, - "gallery": GallerySerializable, - "chatbot": JSONSerializable, - "model3d": FileSerializable, - "plot": JSONSerializable, - "barplot": JSONSerializable, - "lineplot": JSONSerializable, - "scatterplot": JSONSerializable, - "markdown": StringSerializable, - "dataset": StringSerializable, - "code": StringSerializable, - "interpretation": SimpleSerializable, - "annotatedimage": JSONSerializable, -} diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/mlab.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/mlab.py deleted file mode 100644 index 059cf0f1624b6f0bf1144c387210144116d76b16..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/mlab.py +++ /dev/null @@ -1,987 +0,0 @@ -""" -Numerical Python functions written for compatibility with MATLAB -commands with the same names. Most numerical Python functions can be found in -the `NumPy`_ and `SciPy`_ libraries. What remains here is code for performing -spectral computations and kernel density estimations. - -.. _NumPy: https://numpy.org -.. _SciPy: https://www.scipy.org - -Spectral functions ------------------- - -`cohere` - Coherence (normalized cross spectral density) - -`csd` - Cross spectral density using Welch's average periodogram - -`detrend` - Remove the mean or best fit line from an array - -`psd` - Power spectral density using Welch's average periodogram - -`specgram` - Spectrogram (spectrum over segments of time) - -`complex_spectrum` - Return the complex-valued frequency spectrum of a signal - -`magnitude_spectrum` - Return the magnitude of the frequency spectrum of a signal - -`angle_spectrum` - Return the angle (wrapped phase) of the frequency spectrum of a signal - -`phase_spectrum` - Return the phase (unwrapped angle) of the frequency spectrum of a signal - -`detrend_mean` - Remove the mean from a line. - -`detrend_linear` - Remove the best fit line from a line. - -`detrend_none` - Return the original line. - -`stride_windows` - Get all windows in an array in a memory-efficient manner -""" - -import functools -from numbers import Number - -import numpy as np - -from matplotlib import _api, _docstring, cbook - - -def window_hanning(x): - """ - Return *x* times the Hanning (or Hann) window of len(*x*). - - See Also - -------- - window_none : Another window algorithm. - """ - return np.hanning(len(x))*x - - -def window_none(x): - """ - No window function; simply return *x*. - - See Also - -------- - window_hanning : Another window algorithm. - """ - return x - - -def detrend(x, key=None, axis=None): - """ - Return *x* with its trend removed. - - Parameters - ---------- - x : array or sequence - Array or sequence containing the data. - - key : {'default', 'constant', 'mean', 'linear', 'none'} or function - The detrending algorithm to use. 'default', 'mean', and 'constant' are - the same as `detrend_mean`. 'linear' is the same as `detrend_linear`. - 'none' is the same as `detrend_none`. The default is 'mean'. See the - corresponding functions for more details regarding the algorithms. Can - also be a function that carries out the detrend operation. - - axis : int - The axis along which to do the detrending. - - See Also - -------- - detrend_mean : Implementation of the 'mean' algorithm. - detrend_linear : Implementation of the 'linear' algorithm. - detrend_none : Implementation of the 'none' algorithm. - """ - if key is None or key in ['constant', 'mean', 'default']: - return detrend(x, key=detrend_mean, axis=axis) - elif key == 'linear': - return detrend(x, key=detrend_linear, axis=axis) - elif key == 'none': - return detrend(x, key=detrend_none, axis=axis) - elif callable(key): - x = np.asarray(x) - if axis is not None and axis + 1 > x.ndim: - raise ValueError(f'axis(={axis}) out of bounds') - if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1): - return key(x) - # try to use the 'axis' argument if the function supports it, - # otherwise use apply_along_axis to do it - try: - return key(x, axis=axis) - except TypeError: - return np.apply_along_axis(key, axis=axis, arr=x) - else: - raise ValueError( - f"Unknown value for key: {key!r}, must be one of: 'default', " - f"'constant', 'mean', 'linear', or a function") - - -def detrend_mean(x, axis=None): - """ - Return *x* minus the mean(*x*). - - Parameters - ---------- - x : array or sequence - Array or sequence containing the data - Can have any dimensionality - - axis : int - The axis along which to take the mean. See `numpy.mean` for a - description of this argument. - - See Also - -------- - detrend_linear : Another detrend algorithm. - detrend_none : Another detrend algorithm. - detrend : A wrapper around all the detrend algorithms. - """ - x = np.asarray(x) - - if axis is not None and axis+1 > x.ndim: - raise ValueError('axis(=%s) out of bounds' % axis) - - return x - x.mean(axis, keepdims=True) - - -def detrend_none(x, axis=None): - """ - Return *x*: no detrending. - - Parameters - ---------- - x : any object - An object containing the data - - axis : int - This parameter is ignored. - It is included for compatibility with detrend_mean - - See Also - -------- - detrend_mean : Another detrend algorithm. - detrend_linear : Another detrend algorithm. - detrend : A wrapper around all the detrend algorithms. - """ - return x - - -def detrend_linear(y): - """ - Return *x* minus best fit line; 'linear' detrending. - - Parameters - ---------- - y : 0-D or 1-D array or sequence - Array or sequence containing the data - - See Also - -------- - detrend_mean : Another detrend algorithm. - detrend_none : Another detrend algorithm. - detrend : A wrapper around all the detrend algorithms. - """ - # This is faster than an algorithm based on linalg.lstsq. - y = np.asarray(y) - - if y.ndim > 1: - raise ValueError('y cannot have ndim > 1') - - # short-circuit 0-D array. - if not y.ndim: - return np.array(0., dtype=y.dtype) - - x = np.arange(y.size, dtype=float) - - C = np.cov(x, y, bias=1) - b = C[0, 1]/C[0, 0] - - a = y.mean() - b*x.mean() - return y - (b*x + a) - - -@_api.deprecated("3.6") -def stride_windows(x, n, noverlap=None, axis=0): - """ - Get all windows of *x* with length *n* as a single array, - using strides to avoid data duplication. - - .. warning:: - - It is not safe to write to the output array. Multiple - elements may point to the same piece of memory, - so modifying one value may change others. - - Parameters - ---------- - x : 1D array or sequence - Array or sequence containing the data. - n : int - The number of data points in each window. - noverlap : int, default: 0 (no overlap) - The overlap between adjacent windows. - axis : int - The axis along which the windows will run. - - References - ---------- - `stackoverflow: Rolling window for 1D arrays in Numpy? - `_ - `stackoverflow: Using strides for an efficient moving average filter - `_ - """ - if noverlap is None: - noverlap = 0 - if np.ndim(x) != 1: - raise ValueError('only 1-dimensional arrays can be used') - return _stride_windows(x, n, noverlap, axis) - - -def _stride_windows(x, n, noverlap=0, axis=0): - # np>=1.20 provides sliding_window_view, and we only ever use axis=0. - if hasattr(np.lib.stride_tricks, "sliding_window_view") and axis == 0: - if noverlap >= n: - raise ValueError('noverlap must be less than n') - return np.lib.stride_tricks.sliding_window_view( - x, n, axis=0)[::n - noverlap].T - - if noverlap >= n: - raise ValueError('noverlap must be less than n') - if n < 1: - raise ValueError('n cannot be less than 1') - - x = np.asarray(x) - - if n == 1 and noverlap == 0: - if axis == 0: - return x[np.newaxis] - else: - return x[np.newaxis].T - if n > x.size: - raise ValueError('n cannot be greater than the length of x') - - # np.lib.stride_tricks.as_strided easily leads to memory corruption for - # non integer shape and strides, i.e. noverlap or n. See #3845. - noverlap = int(noverlap) - n = int(n) - - step = n - noverlap - if axis == 0: - shape = (n, (x.shape[-1]-noverlap)//step) - strides = (x.strides[0], step*x.strides[0]) - else: - shape = ((x.shape[-1]-noverlap)//step, n) - strides = (step*x.strides[0], x.strides[0]) - return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides) - - -def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None, - window=None, noverlap=None, pad_to=None, - sides=None, scale_by_freq=None, mode=None): - """ - Private helper implementing the common parts between the psd, csd, - spectrogram and complex, magnitude, angle, and phase spectrums. - """ - if y is None: - # if y is None use x for y - same_data = True - else: - # The checks for if y is x are so that we can use the same function to - # implement the core of psd(), csd(), and spectrogram() without doing - # extra calculations. We return the unaveraged Pxy, freqs, and t. - same_data = y is x - - if Fs is None: - Fs = 2 - if noverlap is None: - noverlap = 0 - if detrend_func is None: - detrend_func = detrend_none - if window is None: - window = window_hanning - - # if NFFT is set to None use the whole signal - if NFFT is None: - NFFT = 256 - - if mode is None or mode == 'default': - mode = 'psd' - _api.check_in_list( - ['default', 'psd', 'complex', 'magnitude', 'angle', 'phase'], - mode=mode) - - if not same_data and mode != 'psd': - raise ValueError("x and y must be equal if mode is not 'psd'") - - # Make sure we're dealing with a numpy array. If y and x were the same - # object to start with, keep them that way - x = np.asarray(x) - if not same_data: - y = np.asarray(y) - - if sides is None or sides == 'default': - if np.iscomplexobj(x): - sides = 'twosided' - else: - sides = 'onesided' - _api.check_in_list(['default', 'onesided', 'twosided'], sides=sides) - - # zero pad x and y up to NFFT if they are shorter than NFFT - if len(x) < NFFT: - n = len(x) - x = np.resize(x, NFFT) - x[n:] = 0 - - if not same_data and len(y) < NFFT: - n = len(y) - y = np.resize(y, NFFT) - y[n:] = 0 - - if pad_to is None: - pad_to = NFFT - - if mode != 'psd': - scale_by_freq = False - elif scale_by_freq is None: - scale_by_freq = True - - # For real x, ignore the negative frequencies unless told otherwise - if sides == 'twosided': - numFreqs = pad_to - if pad_to % 2: - freqcenter = (pad_to - 1)//2 + 1 - else: - freqcenter = pad_to//2 - scaling_factor = 1. - elif sides == 'onesided': - if pad_to % 2: - numFreqs = (pad_to + 1)//2 - else: - numFreqs = pad_to//2 + 1 - scaling_factor = 2. - - if not np.iterable(window): - window = window(np.ones(NFFT, x.dtype)) - if len(window) != NFFT: - raise ValueError( - "The window length must match the data's first dimension") - - result = _stride_windows(x, NFFT, noverlap) - result = detrend(result, detrend_func, axis=0) - result = result * window.reshape((-1, 1)) - result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :] - freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs] - - if not same_data: - # if same_data is False, mode must be 'psd' - resultY = _stride_windows(y, NFFT, noverlap) - resultY = detrend(resultY, detrend_func, axis=0) - resultY = resultY * window.reshape((-1, 1)) - resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :] - result = np.conj(result) * resultY - elif mode == 'psd': - result = np.conj(result) * result - elif mode == 'magnitude': - result = np.abs(result) / window.sum() - elif mode == 'angle' or mode == 'phase': - # we unwrap the phase later to handle the onesided vs. twosided case - result = np.angle(result) - elif mode == 'complex': - result /= window.sum() - - if mode == 'psd': - - # Also include scaling factors for one-sided densities and dividing by - # the sampling frequency, if desired. Scale everything, except the DC - # component and the NFFT/2 component: - - # if we have a even number of frequencies, don't scale NFFT/2 - if not NFFT % 2: - slc = slice(1, -1, None) - # if we have an odd number, just don't scale DC - else: - slc = slice(1, None, None) - - result[slc] *= scaling_factor - - # MATLAB divides by the sampling frequency so that density function - # has units of dB/Hz and can be integrated by the plotted frequency - # values. Perform the same scaling here. - if scale_by_freq: - result /= Fs - # Scale the spectrum by the norm of the window to compensate for - # windowing loss; see Bendat & Piersol Sec 11.5.2. - result /= (window**2).sum() - else: - # In this case, preserve power in the segment, not amplitude - result /= window.sum()**2 - - t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs - - if sides == 'twosided': - # center the frequency range at zero - freqs = np.roll(freqs, -freqcenter, axis=0) - result = np.roll(result, -freqcenter, axis=0) - elif not pad_to % 2: - # get the last value correctly, it is negative otherwise - freqs[-1] *= -1 - - # we unwrap the phase here to handle the onesided vs. twosided case - if mode == 'phase': - result = np.unwrap(result, axis=0) - - return result, freqs, t - - -def _single_spectrum_helper( - mode, x, Fs=None, window=None, pad_to=None, sides=None): - """ - Private helper implementing the commonality between the complex, magnitude, - angle, and phase spectrums. - """ - _api.check_in_list(['complex', 'magnitude', 'angle', 'phase'], mode=mode) - - if pad_to is None: - pad_to = len(x) - - spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs, - detrend_func=detrend_none, window=window, - noverlap=0, pad_to=pad_to, - sides=sides, - scale_by_freq=False, - mode=mode) - if mode != 'complex': - spec = spec.real - - if spec.ndim == 2 and spec.shape[1] == 1: - spec = spec[:, 0] - - return spec, freqs - - -# Split out these keyword docs so that they can be used elsewhere -_docstring.interpd.update( - Spectral="""\ -Fs : float, default: 2 - The sampling frequency (samples per time unit). It is used to calculate - the Fourier frequencies, *freqs*, in cycles per time unit. - -window : callable or ndarray, default: `.window_hanning` - A function or a vector of length *NFFT*. To create window vectors see - `.window_hanning`, `.window_none`, `numpy.blackman`, `numpy.hamming`, - `numpy.bartlett`, `scipy.signal`, `scipy.signal.get_window`, etc. If a - function is passed as the argument, it must take a data segment as an - argument and return the windowed version of the segment. - -sides : {'default', 'onesided', 'twosided'}, optional - Which sides of the spectrum to return. 'default' is one-sided for real - data and two-sided for complex data. 'onesided' forces the return of a - one-sided spectrum, while 'twosided' forces two-sided.""", - - Single_Spectrum="""\ -pad_to : int, optional - The number of points to which the data segment is padded when performing - the FFT. While not increasing the actual resolution of the spectrum (the - minimum distance between resolvable peaks), this can give more points in - the plot, allowing for more detail. This corresponds to the *n* parameter - in the call to `~numpy.fft.fft`. The default is None, which sets *pad_to* - equal to the length of the input signal (i.e. no padding).""", - - PSD="""\ -pad_to : int, optional - The number of points to which the data segment is padded when performing - the FFT. This can be different from *NFFT*, which specifies the number - of data points used. While not increasing the actual resolution of the - spectrum (the minimum distance between resolvable peaks), this can give - more points in the plot, allowing for more detail. This corresponds to - the *n* parameter in the call to `~numpy.fft.fft`. The default is None, - which sets *pad_to* equal to *NFFT* - -NFFT : int, default: 256 - The number of data points used in each block for the FFT. A power 2 is - most efficient. This should *NOT* be used to get zero padding, or the - scaling of the result will be incorrect; use *pad_to* for this instead. - -detrend : {'none', 'mean', 'linear'} or callable, default: 'none' - The function applied to each segment before fft-ing, designed to remove - the mean or linear trend. Unlike in MATLAB, where the *detrend* parameter - is a vector, in Matplotlib it is a function. The :mod:`~matplotlib.mlab` - module defines `.detrend_none`, `.detrend_mean`, and `.detrend_linear`, - but you can use a custom function as well. You can also use a string to - choose one of the functions: 'none' calls `.detrend_none`. 'mean' calls - `.detrend_mean`. 'linear' calls `.detrend_linear`. - -scale_by_freq : bool, default: True - Whether the resulting density values should be scaled by the scaling - frequency, which gives density in units of 1/Hz. This allows for - integration over the returned frequency values. The default is True for - MATLAB compatibility.""") - - -@_docstring.dedent_interpd -def psd(x, NFFT=None, Fs=None, detrend=None, window=None, - noverlap=None, pad_to=None, sides=None, scale_by_freq=None): - r""" - Compute the power spectral density. - - The power spectral density :math:`P_{xx}` by Welch's average - periodogram method. The vector *x* is divided into *NFFT* length - segments. Each segment is detrended by function *detrend* and - windowed by function *window*. *noverlap* gives the length of - the overlap between segments. The :math:`|\mathrm{fft}(i)|^2` - of each segment :math:`i` are averaged to compute :math:`P_{xx}`. - - If len(*x*) < *NFFT*, it will be zero padded to *NFFT*. - - Parameters - ---------- - x : 1-D array or sequence - Array or sequence containing the data - - %(Spectral)s - - %(PSD)s - - noverlap : int, default: 0 (no overlap) - The number of points of overlap between segments. - - Returns - ------- - Pxx : 1-D array - The values for the power spectrum :math:`P_{xx}` (real valued) - - freqs : 1-D array - The frequencies corresponding to the elements in *Pxx* - - References - ---------- - Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John - Wiley & Sons (1986) - - See Also - -------- - specgram - `specgram` differs in the default overlap; in not returning the mean of - the segment periodograms; and in returning the times of the segments. - - magnitude_spectrum : returns the magnitude spectrum. - - csd : returns the spectral density between two signals. - """ - Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend, - window=window, noverlap=noverlap, pad_to=pad_to, - sides=sides, scale_by_freq=scale_by_freq) - return Pxx.real, freqs - - -@_docstring.dedent_interpd -def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None, - noverlap=None, pad_to=None, sides=None, scale_by_freq=None): - """ - Compute the cross-spectral density. - - The cross spectral density :math:`P_{xy}` by Welch's average - periodogram method. The vectors *x* and *y* are divided into - *NFFT* length segments. Each segment is detrended by function - *detrend* and windowed by function *window*. *noverlap* gives - the length of the overlap between segments. The product of - the direct FFTs of *x* and *y* are averaged over each segment - to compute :math:`P_{xy}`, with a scaling to correct for power - loss due to windowing. - - If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero - padded to *NFFT*. - - Parameters - ---------- - x, y : 1-D arrays or sequences - Arrays or sequences containing the data - - %(Spectral)s - - %(PSD)s - - noverlap : int, default: 0 (no overlap) - The number of points of overlap between segments. - - Returns - ------- - Pxy : 1-D array - The values for the cross spectrum :math:`P_{xy}` before scaling (real - valued) - - freqs : 1-D array - The frequencies corresponding to the elements in *Pxy* - - References - ---------- - Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John - Wiley & Sons (1986) - - See Also - -------- - psd : equivalent to setting ``y = x``. - """ - if NFFT is None: - NFFT = 256 - Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs, - detrend_func=detrend, window=window, - noverlap=noverlap, pad_to=pad_to, - sides=sides, scale_by_freq=scale_by_freq, - mode='psd') - - if Pxy.ndim == 2: - if Pxy.shape[1] > 1: - Pxy = Pxy.mean(axis=1) - else: - Pxy = Pxy[:, 0] - return Pxy, freqs - - -_single_spectrum_docs = """\ -Compute the {quantity} of *x*. -Data is padded to a length of *pad_to* and the windowing function *window* is -applied to the signal. - -Parameters ----------- -x : 1-D array or sequence - Array or sequence containing the data - -{Spectral} - -{Single_Spectrum} - -Returns -------- -spectrum : 1-D array - The {quantity}. -freqs : 1-D array - The frequencies corresponding to the elements in *spectrum*. - -See Also --------- -psd - Returns the power spectral density. -complex_spectrum - Returns the complex-valued frequency spectrum. -magnitude_spectrum - Returns the absolute value of the `complex_spectrum`. -angle_spectrum - Returns the angle of the `complex_spectrum`. -phase_spectrum - Returns the phase (unwrapped angle) of the `complex_spectrum`. -specgram - Can return the complex spectrum of segments within the signal. -""" - - -complex_spectrum = functools.partial(_single_spectrum_helper, "complex") -complex_spectrum.__doc__ = _single_spectrum_docs.format( - quantity="complex-valued frequency spectrum", - **_docstring.interpd.params) -magnitude_spectrum = functools.partial(_single_spectrum_helper, "magnitude") -magnitude_spectrum.__doc__ = _single_spectrum_docs.format( - quantity="magnitude (absolute value) of the frequency spectrum", - **_docstring.interpd.params) -angle_spectrum = functools.partial(_single_spectrum_helper, "angle") -angle_spectrum.__doc__ = _single_spectrum_docs.format( - quantity="angle of the frequency spectrum (wrapped phase spectrum)", - **_docstring.interpd.params) -phase_spectrum = functools.partial(_single_spectrum_helper, "phase") -phase_spectrum.__doc__ = _single_spectrum_docs.format( - quantity="phase of the frequency spectrum (unwrapped phase spectrum)", - **_docstring.interpd.params) - - -@_docstring.dedent_interpd -def specgram(x, NFFT=None, Fs=None, detrend=None, window=None, - noverlap=None, pad_to=None, sides=None, scale_by_freq=None, - mode=None): - """ - Compute a spectrogram. - - Compute and plot a spectrogram of data in *x*. Data are split into - *NFFT* length segments and the spectrum of each section is - computed. The windowing function *window* is applied to each - segment, and the amount of overlap of each segment is - specified with *noverlap*. - - Parameters - ---------- - x : array-like - 1-D array or sequence. - - %(Spectral)s - - %(PSD)s - - noverlap : int, default: 128 - The number of points of overlap between blocks. - mode : str, default: 'psd' - What sort of spectrum to use: - 'psd' - Returns the power spectral density. - 'complex' - Returns the complex-valued frequency spectrum. - 'magnitude' - Returns the magnitude spectrum. - 'angle' - Returns the phase spectrum without unwrapping. - 'phase' - Returns the phase spectrum with unwrapping. - - Returns - ------- - spectrum : array-like - 2D array, columns are the periodograms of successive segments. - - freqs : array-like - 1-D array, frequencies corresponding to the rows in *spectrum*. - - t : array-like - 1-D array, the times corresponding to midpoints of segments - (i.e the columns in *spectrum*). - - See Also - -------- - psd : differs in the overlap and in the return values. - complex_spectrum : similar, but with complex valued frequencies. - magnitude_spectrum : similar single segment when *mode* is 'magnitude'. - angle_spectrum : similar to single segment when *mode* is 'angle'. - phase_spectrum : similar to single segment when *mode* is 'phase'. - - Notes - ----- - *detrend* and *scale_by_freq* only apply when *mode* is set to 'psd'. - - """ - if noverlap is None: - noverlap = 128 # default in _spectral_helper() is noverlap = 0 - if NFFT is None: - NFFT = 256 # same default as in _spectral_helper() - if len(x) <= NFFT: - _api.warn_external("Only one segment is calculated since parameter " - f"NFFT (={NFFT}) >= signal length (={len(x)}).") - - spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs, - detrend_func=detrend, window=window, - noverlap=noverlap, pad_to=pad_to, - sides=sides, - scale_by_freq=scale_by_freq, - mode=mode) - - if mode != 'complex': - spec = spec.real # Needed since helper implements generically - - return spec, freqs, t - - -@_docstring.dedent_interpd -def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, - noverlap=0, pad_to=None, sides='default', scale_by_freq=None): - r""" - The coherence between *x* and *y*. Coherence is the normalized - cross spectral density: - - .. math:: - - C_{xy} = \frac{|P_{xy}|^2}{P_{xx}P_{yy}} - - Parameters - ---------- - x, y - Array or sequence containing the data - - %(Spectral)s - - %(PSD)s - - noverlap : int, default: 0 (no overlap) - The number of points of overlap between segments. - - Returns - ------- - Cxy : 1-D array - The coherence vector. - freqs : 1-D array - The frequencies for the elements in *Cxy*. - - See Also - -------- - :func:`psd`, :func:`csd` : - For information about the methods used to compute :math:`P_{xy}`, - :math:`P_{xx}` and :math:`P_{yy}`. - """ - if len(x) < 2 * NFFT: - raise ValueError( - "Coherence is calculated by averaging over *NFFT* length " - "segments. Your signal is too short for your choice of *NFFT*.") - Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, - scale_by_freq) - Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, - scale_by_freq) - Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, - scale_by_freq) - Cxy = np.abs(Pxy) ** 2 / (Pxx * Pyy) - return Cxy, f - - -class GaussianKDE: - """ - Representation of a kernel-density estimate using Gaussian kernels. - - Parameters - ---------- - dataset : array-like - Datapoints to estimate from. In case of univariate data this is a 1-D - array, otherwise a 2D array with shape (# of dims, # of data). - bw_method : str, scalar or callable, optional - The method used to calculate the estimator bandwidth. This can be - 'scott', 'silverman', a scalar constant or a callable. If a - scalar, this will be used directly as `kde.factor`. If a - callable, it should take a `GaussianKDE` instance as only - parameter and return a scalar. If None (default), 'scott' is used. - - Attributes - ---------- - dataset : ndarray - The dataset passed to the constructor. - dim : int - Number of dimensions. - num_dp : int - Number of datapoints. - factor : float - The bandwidth factor, obtained from `kde.covariance_factor`, with which - the covariance matrix is multiplied. - covariance : ndarray - The covariance matrix of *dataset*, scaled by the calculated bandwidth - (`kde.factor`). - inv_cov : ndarray - The inverse of *covariance*. - - Methods - ------- - kde.evaluate(points) : ndarray - Evaluate the estimated pdf on a provided set of points. - kde(points) : ndarray - Same as kde.evaluate(points) - """ - - # This implementation with minor modification was too good to pass up. - # from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py - - def __init__(self, dataset, bw_method=None): - self.dataset = np.atleast_2d(dataset) - if not np.array(self.dataset).size > 1: - raise ValueError("`dataset` input should have multiple elements.") - - self.dim, self.num_dp = np.array(self.dataset).shape - - if bw_method is None: - pass - elif cbook._str_equal(bw_method, 'scott'): - self.covariance_factor = self.scotts_factor - elif cbook._str_equal(bw_method, 'silverman'): - self.covariance_factor = self.silverman_factor - elif isinstance(bw_method, Number): - self._bw_method = 'use constant' - self.covariance_factor = lambda: bw_method - elif callable(bw_method): - self._bw_method = bw_method - self.covariance_factor = lambda: self._bw_method(self) - else: - raise ValueError("`bw_method` should be 'scott', 'silverman', a " - "scalar or a callable") - - # Computes the covariance matrix for each Gaussian kernel using - # covariance_factor(). - - self.factor = self.covariance_factor() - # Cache covariance and inverse covariance of the data - if not hasattr(self, '_data_inv_cov'): - self.data_covariance = np.atleast_2d( - np.cov( - self.dataset, - rowvar=1, - bias=False)) - self.data_inv_cov = np.linalg.inv(self.data_covariance) - - self.covariance = self.data_covariance * self.factor ** 2 - self.inv_cov = self.data_inv_cov / self.factor ** 2 - self.norm_factor = (np.sqrt(np.linalg.det(2 * np.pi * self.covariance)) - * self.num_dp) - - def scotts_factor(self): - return np.power(self.num_dp, -1. / (self.dim + 4)) - - def silverman_factor(self): - return np.power( - self.num_dp * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4)) - - # Default method to calculate bandwidth, can be overwritten by subclass - covariance_factor = scotts_factor - - def evaluate(self, points): - """ - Evaluate the estimated pdf on a set of points. - - Parameters - ---------- - points : (# of dimensions, # of points)-array - Alternatively, a (# of dimensions,) vector can be passed in and - treated as a single point. - - Returns - ------- - (# of points,)-array - The values at each point. - - Raises - ------ - ValueError : if the dimensionality of the input points is different - than the dimensionality of the KDE. - - """ - points = np.atleast_2d(points) - - dim, num_m = np.array(points).shape - if dim != self.dim: - raise ValueError("points have dimension {}, dataset has dimension " - "{}".format(dim, self.dim)) - - result = np.zeros(num_m) - - if num_m >= self.num_dp: - # there are more points than data, so loop over data - for i in range(self.num_dp): - diff = self.dataset[:, i, np.newaxis] - points - tdiff = np.dot(self.inv_cov, diff) - energy = np.sum(diff * tdiff, axis=0) / 2.0 - result = result + np.exp(-energy) - else: - # loop over points - for i in range(num_m): - diff = self.dataset - points[:, i, np.newaxis] - tdiff = np.dot(self.inv_cov, diff) - energy = np.sum(diff * tdiff, axis=0) / 2.0 - result[i] = np.sum(np.exp(-energy), axis=0) - - result = result / self.norm_factor - - return result - - __call__ = evaluate diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/data/dataset_video_train.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/data/dataset_video_train.py deleted file mode 100644 index 8a14d46a84c480ff984bd7482c2d7cc357bc9b41..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/data/dataset_video_train.py +++ /dev/null @@ -1,390 +0,0 @@ -import numpy as np -import os -import random -import torch -from pathlib import Path -import torch.utils.data as data - -import utils.utils_video as utils_video - - -class VideoRecurrentTrainDataset(data.Dataset): - """Video dataset for training recurrent networks. - - The keys are generated from a meta info txt file. - basicsr/data/meta_info/meta_info_XXX_GT.txt - - Each line contains: - 1. subfolder (clip) name; 2. frame number; 3. image shape, separated by - a white space. - Examples: - 720p_240fps_1 100 (720,1280,3) - 720p_240fps_3 100 (720,1280,3) - ... - - Key examples: "720p_240fps_1/00000" - GT (gt): Ground-Truth; - LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames. - - Args: - opt (dict): Config for train dataset. It contains the following keys: - dataroot_gt (str): Data root path for gt. - dataroot_lq (str): Data root path for lq. - dataroot_flow (str, optional): Data root path for flow. - meta_info_file (str): Path for meta information file. - val_partition (str): Validation partition types. 'REDS4' or - 'official'. - io_backend (dict): IO backend type and other kwarg. - - num_frame (int): Window size for input frames. - gt_size (int): Cropped patched size for gt patches. - interval_list (list): Interval list for temporal augmentation. - random_reverse (bool): Random reverse input frames. - use_hflip (bool): Use horizontal flips. - use_rot (bool): Use rotation (use vertical flip and transposing h - and w for implementation). - - scale (bool): Scale, which will be added automatically. - """ - - def __init__(self, opt): - super(VideoRecurrentTrainDataset, self).__init__() - self.opt = opt - self.scale = opt.get('scale', 4) - self.gt_size = opt.get('gt_size', 256) - self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq']) - self.filename_tmpl = opt.get('filename_tmpl', '08d') - self.filename_ext = opt.get('filename_ext', 'png') - self.num_frame = opt['num_frame'] - - keys = [] - total_num_frames = [] # some clips may not have 100 frames - start_frames = [] # some clips may not start from 00000 - train_folders = os.listdir(self.lq_root) - print("TRAIN FOLDER: ", train_folders[0]) - with open(opt['meta_info_file'], 'r') as fin: - for line in fin: - folder, frame_num, _, start_frame = line.split(' ') - if folder in train_folders: - keys.extend([f'{folder}/{i:{self.filename_tmpl}}' for i in range(int(start_frame), int(start_frame)+int(frame_num))]) - total_num_frames.extend([int(frame_num) for i in range(int(frame_num))]) - start_frames.extend([int(start_frame) for i in range(int(frame_num))]) - - # remove the video clips used in validation - if opt['name'] == 'REDS': - if opt['val_partition'] == 'REDS4': - val_partition = ['000', '011', '015', '020'] - elif opt['val_partition'] == 'official': - val_partition = [f'{v:03d}' for v in range(240, 270)] - else: - raise ValueError(f'Wrong validation partition {opt["val_partition"]}.' - f"Supported ones are ['official', 'REDS4'].") - else: - val_partition = [] - - self.keys = [] - self.total_num_frames = [] # some clips may not have 100 frames - self.start_frames = [] - if opt['test_mode']: - for i, v in zip(range(len(keys)), keys): - if v.split('/')[0] in val_partition: - self.keys.append(keys[i]) - self.total_num_frames.append(total_num_frames[i]) - self.start_frames.append(start_frames[i]) - else: - for i, v in zip(range(len(keys)), keys): - if v.split('/')[0] not in val_partition: - self.keys.append(keys[i]) - self.total_num_frames.append(total_num_frames[i]) - self.start_frames.append(start_frames[i]) - - # file client (io backend) - self.file_client = None - self.io_backend_opt = opt['io_backend'] - self.is_lmdb = False - if self.io_backend_opt['type'] == 'lmdb': - self.is_lmdb = True - if hasattr(self, 'flow_root') and self.flow_root is not None: - self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root, self.flow_root] - self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow'] - else: - self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root] - self.io_backend_opt['client_keys'] = ['lq', 'gt'] - - # temporal augmentation configs - self.interval_list = opt.get('interval_list', [1]) - self.random_reverse = opt.get('random_reverse', False) - interval_str = ','.join(str(x) for x in self.interval_list) - print(f'Temporal augmentation interval list: [{interval_str}]; ' - f'random reverse is {self.random_reverse}.') - - def __getitem__(self, index): - if self.file_client is None: - self.file_client = utils_video.FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) - - key = self.keys[index] - total_num_frames = self.total_num_frames[index] - start_frames = self.start_frames[index] - clip_name, frame_name = key.split('/') # key example: 000/00000000 - - # determine the neighboring frames - interval = random.choice(self.interval_list) - - # ensure not exceeding the borders - start_frame_idx = int(frame_name) - endmost_start_frame_idx = start_frames + total_num_frames - self.num_frame * interval - if start_frame_idx > endmost_start_frame_idx: - start_frame_idx = random.randint(start_frames, endmost_start_frame_idx) - end_frame_idx = start_frame_idx + self.num_frame * interval - - neighbor_list = list(range(start_frame_idx, end_frame_idx, interval)) - - # random reverse - if self.random_reverse and random.random() < 0.5: - neighbor_list.reverse() - - # get the neighboring LQ and GT frames - img_lqs = [] - img_gts = [] - for neighbor in neighbor_list: - if self.is_lmdb: - img_lq_path = f'{clip_name}/{neighbor:{self.filename_tmpl}}' - img_gt_path = f'{clip_name}/{neighbor:{self.filename_tmpl}}' - else: - img_lq_path = self.lq_root / clip_name / f'{neighbor:{self.filename_tmpl}}.{self.filename_ext}' - img_gt_path = self.gt_root / clip_name / f'{neighbor:{self.filename_tmpl}}.{self.filename_ext}' - - # get LQ - img_bytes = self.file_client.get(img_lq_path, 'lq') - img_lq = utils_video.imfrombytes(img_bytes, float32=True) - img_lqs.append(img_lq) - - # get GT - img_bytes = self.file_client.get(img_gt_path, 'gt') - img_gt = utils_video.imfrombytes(img_bytes, float32=True) - img_gts.append(img_gt) - - # randomly crop - img_gts, img_lqs = utils_video.paired_random_crop(img_gts, img_lqs, self.gt_size, self.scale, img_gt_path) - - # augmentation - flip, rotate - img_lqs.extend(img_gts) - img_results = utils_video.augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot']) - - img_results = utils_video.img2tensor(img_results) - img_gts = torch.stack(img_results[len(img_lqs) // 2:], dim=0) - img_lqs = torch.stack(img_results[:len(img_lqs) // 2], dim=0) - - # img_lqs: (t, c, h, w) - # img_gts: (t, c, h, w) - # key: str - return {'L': img_lqs, 'H': img_gts, 'key': key} - - def __len__(self): - return len(self.keys) - - -class VideoRecurrentTrainNonblindDenoisingDataset(VideoRecurrentTrainDataset): - """Video dataset for training recurrent architectures in non-blind video denoising. - - Args: - Same as VideoTestDataset. - - """ - - def __init__(self, opt): - super(VideoRecurrentTrainNonblindDenoisingDataset, self).__init__(opt) - self.sigma_min = self.opt['sigma_min'] / 255. - self.sigma_max = self.opt['sigma_max'] / 255. - - def __getitem__(self, index): - if self.file_client is None: - self.file_client = utils_video.FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) - - key = self.keys[index] - total_num_frames = self.total_num_frames[index] - start_frames = self.start_frames[index] - clip_name, frame_name = key.split('/') # key example: 000/00000000 - - # determine the neighboring frames - interval = random.choice(self.interval_list) - - # ensure not exceeding the borders - start_frame_idx = int(frame_name) - endmost_start_frame_idx = start_frames + total_num_frames - self.num_frame * interval - if start_frame_idx > endmost_start_frame_idx: - start_frame_idx = random.randint(start_frames, endmost_start_frame_idx) - end_frame_idx = start_frame_idx + self.num_frame * interval - - neighbor_list = list(range(start_frame_idx, end_frame_idx, interval)) - - # random reverse - if self.random_reverse and random.random() < 0.5: - neighbor_list.reverse() - - # get the neighboring GT frames - img_gts = [] - for neighbor in neighbor_list: - if self.is_lmdb: - img_gt_path = f'{clip_name}/{neighbor:{self.filename_tmpl}}' - else: - img_gt_path = self.gt_root / clip_name / f'{neighbor:{self.filename_tmpl}}.{self.filename_ext}' - - # get GT - img_bytes = self.file_client.get(img_gt_path, 'gt') - img_gt = utils_video.imfrombytes(img_bytes, float32=True) - img_gts.append(img_gt) - - # randomly crop - img_gts, _ = utils_video.paired_random_crop(img_gts, img_gts, self.gt_size, 1, img_gt_path) - - # augmentation - flip, rotate - img_gts = utils_video.augment(img_gts, self.opt['use_hflip'], self.opt['use_rot']) - - img_gts = utils_video.img2tensor(img_gts) - img_gts = torch.stack(img_gts, dim=0) - - # we add noise in the network - noise_level = torch.empty((1, 1, 1, 1)).uniform_(self.sigma_min, self.sigma_max) - noise = torch.normal(mean=0, std=noise_level.expand_as(img_gts)) - img_lqs = img_gts + noise - - t, _, h, w = img_lqs.shape - img_lqs = torch.cat([img_lqs, noise_level.expand(t, 1, h, w)], 1) - - # img_lqs: (t, c, h, w) - # img_gts: (t, c, h, w) - # key: str - return {'L': img_lqs, 'H': img_gts, 'key': key} - - - def __len__(self): - return len(self.keys) - - -class VideoRecurrentTrainVimeoDataset(data.Dataset): - """Vimeo90K dataset for training recurrent networks. - - The keys are generated from a meta info txt file. - basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt - - Each line contains: - 1. clip name; 2. frame number; 3. image shape, separated by a white space. - Examples: - 00001/0001 7 (256,448,3) - 00001/0002 7 (256,448,3) - - Key examples: "00001/0001" - GT (gt): Ground-Truth; - LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames. - - The neighboring frame list for different num_frame: - num_frame | frame list - 1 | 4 - 3 | 3,4,5 - 5 | 2,3,4,5,6 - 7 | 1,2,3,4,5,6,7 - - Args: - opt (dict): Config for train dataset. It contains the following keys: - dataroot_gt (str): Data root path for gt. - dataroot_lq (str): Data root path for lq. - meta_info_file (str): Path for meta information file. - io_backend (dict): IO backend type and other kwarg. - - num_frame (int): Window size for input frames. - gt_size (int): Cropped patched size for gt patches. - random_reverse (bool): Random reverse input frames. - use_hflip (bool): Use horizontal flips. - use_rot (bool): Use rotation (use vertical flip and transposing h - and w for implementation). - - scale (bool): Scale, which will be added automatically. - """ - - def __init__(self, opt): - super(VideoRecurrentTrainVimeoDataset, self).__init__() - self.opt = opt - self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq']) - - with open(opt['meta_info_file'], 'r') as fin: - self.keys = [line.split(' ')[0] for line in fin] - - # file client (io backend) - self.file_client = None - self.io_backend_opt = opt['io_backend'] - self.is_lmdb = False - if self.io_backend_opt['type'] == 'lmdb': - self.is_lmdb = True - self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root] - self.io_backend_opt['client_keys'] = ['lq', 'gt'] - - # indices of input images - self.neighbor_list = [i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])] - - # temporal augmentation configs - self.random_reverse = opt['random_reverse'] - print(f'Random reverse is {self.random_reverse}.') - - self.flip_sequence = opt.get('flip_sequence', False) - self.pad_sequence = opt.get('pad_sequence', False) - self.neighbor_list = [1, 2, 3, 4, 5, 6, 7] - - def __getitem__(self, index): - if self.file_client is None: - self.file_client = utils_video.FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) - - # random reverse - if self.random_reverse and random.random() < 0.5: - self.neighbor_list.reverse() - - scale = self.opt['scale'] - gt_size = self.opt['gt_size'] - key = self.keys[index] - clip, seq = key.split('/') # key example: 00001/0001 - - # get the neighboring LQ and GT frames - img_lqs = [] - img_gts = [] - for neighbor in self.neighbor_list: - if self.is_lmdb: - img_lq_path = f'{clip}/{seq}/im{neighbor}' - img_gt_path = f'{clip}/{seq}/im{neighbor}' - else: - img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png' - img_gt_path = self.gt_root / clip / seq / f'im{neighbor}.png' - # LQ - img_bytes = self.file_client.get(img_lq_path, 'lq') - img_lq = utils_video.imfrombytes(img_bytes, float32=True) - # GT - img_bytes = self.file_client.get(img_gt_path, 'gt') - img_gt = utils_video.imfrombytes(img_bytes, float32=True) - - img_lqs.append(img_lq) - img_gts.append(img_gt) - - # randomly crop - img_gts, img_lqs = utils_video.paired_random_crop(img_gts, img_lqs, gt_size, scale, img_gt_path) - - # augmentation - flip, rotate - img_lqs.extend(img_gts) - img_results = utils_video.augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot']) - - img_results = utils_video.img2tensor(img_results) - img_lqs = torch.stack(img_results[:7], dim=0) - img_gts = torch.stack(img_results[7:], dim=0) - - if self.flip_sequence: # flip the sequence: 7 frames to 14 frames - img_lqs = torch.cat([img_lqs, img_lqs.flip(0)], dim=0) - img_gts = torch.cat([img_gts, img_gts.flip(0)], dim=0) - elif self.pad_sequence: # pad the sequence: 7 frames to 8 frames - img_lqs = torch.cat([img_lqs, img_lqs[-1:,...]], dim=0) - img_gts = torch.cat([img_gts, img_gts[-1:,...]], dim=0) - - # img_lqs: (t, c, h, w) - # img_gt: (c, h, w) - # key: str - return {'L': img_lqs, 'H': img_gts, 'key': key} - - def __len__(self): - return len(self.keys) diff --git a/spaces/lambdalabs/LambdaSuperRes/KAIR/retinaface/layers/modules/__init__.py b/spaces/lambdalabs/LambdaSuperRes/KAIR/retinaface/layers/modules/__init__.py deleted file mode 100644 index cf24bddbf283f233d0b93fc074a2bac2f5c044a9..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/LambdaSuperRes/KAIR/retinaface/layers/modules/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .multibox_loss import MultiBoxLoss - -__all__ = ['MultiBoxLoss'] diff --git a/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/static/txt2img.html b/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/static/txt2img.html deleted file mode 100644 index 9dda69ed5abe85b34c130011dbce3f955ab89de4..0000000000000000000000000000000000000000 --- a/spaces/latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5/static/txt2img.html +++ /dev/null @@ -1,304 +0,0 @@ - - - - - - Real-Time Latent Consistency Model - - - - - - - - - -
    -
       -
    -
    -

    Real-Time Latent Consistency Model

    -

    Text to Image

    -

    - This demo showcases - LCM Text to Image model - using - Diffusers with a MJPEG - stream server. -

    -

    - There are 0 user(s) sharing the same GPU, affecting - real-time performance. Maximum queue size is 10. Duplicate and run it on your - own GPU. -

    -
    -
    -

    Prompt

    -

    - Start your session and type your prompt here, accepts - Compel syntax. -

    -
    - -
    - -
    -
    -
    - Advanced Options -
    - -
    -
    - - -
    -
    - - -
    -
    - - - - - 4 - - - - - 50 - - - - - 8.0 - - - - - -
    -
    -
    -
    - - - -
    -
    - -
    -
    - - - \ No newline at end of file diff --git a/spaces/lazyboy450/RVCv2-Genshin/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/lazyboy450/RVCv2-Genshin/lib/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/lazyboy450/RVCv2-Genshin/lib/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/leilaglewis/03-Streamlit-Video-ASR-NLP/app.py b/spaces/leilaglewis/03-Streamlit-Video-ASR-NLP/app.py deleted file mode 100644 index e0f03cf2557eba112bf95ebf5eb582da8d8a0fe3..0000000000000000000000000000000000000000 --- a/spaces/leilaglewis/03-Streamlit-Video-ASR-NLP/app.py +++ /dev/null @@ -1,119 +0,0 @@ -from collections import deque -import streamlit as st -import torch -from streamlit_player import st_player -from transformers import AutoModelForCTC, Wav2Vec2Processor -from streaming import ffmpeg_stream - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') -player_options = { - "events": ["onProgress"], - "progress_interval": 200, - "volume": 1.0, - "playing": True, - "loop": False, - "controls": False, - "muted": False, - "config": {"youtube": {"playerVars": {"start": 1}}}, -} - -# disable rapid fading in and out on `st.code` updates -st.markdown("", unsafe_allow_html=True) - -@st.cache(hash_funcs={torch.nn.parameter.Parameter: lambda _: None}) -def load_model(model_path="facebook/wav2vec2-large-robust-ft-swbd-300h"): - processor = Wav2Vec2Processor.from_pretrained(model_path) - model = AutoModelForCTC.from_pretrained(model_path).to(device) - return processor, model - -processor, model = load_model() - -def stream_text(url, chunk_duration_ms, pad_duration_ms): - sampling_rate = processor.feature_extractor.sampling_rate - - # calculate the length of logits to cut from the sides of the output to account for input padding - output_pad_len = model._get_feat_extract_output_lengths(int(sampling_rate * pad_duration_ms / 1000)) - - # define the audio chunk generator - stream = ffmpeg_stream(url, sampling_rate, chunk_duration_ms=chunk_duration_ms, pad_duration_ms=pad_duration_ms) - - leftover_text = "" - for i, chunk in enumerate(stream): - input_values = processor(chunk, sampling_rate=sampling_rate, return_tensors="pt").input_values - - with torch.no_grad(): - logits = model(input_values.to(device)).logits[0] - if i > 0: - logits = logits[output_pad_len : len(logits) - output_pad_len] - else: # don't count padding at the start of the clip - logits = logits[: len(logits) - output_pad_len] - - predicted_ids = torch.argmax(logits, dim=-1).cpu().tolist() - if processor.decode(predicted_ids).strip(): - leftover_ids = processor.tokenizer.encode(leftover_text) - # concat the last word (or its part) from the last frame with the current text - text = processor.decode(leftover_ids + predicted_ids) - # don't return the last word in case it's just partially recognized - text, leftover_text = text.rsplit(" ", 1) - yield text - else: - yield leftover_text - leftover_text = "" - yield leftover_text - -def main(): - state = st.session_state - st.header("Video ASR Streamlit from Youtube Link") - - with st.form(key="inputs_form"): - - # Our worlds best teachers on subjects of AI, Cognitive, Neuroscience for our Behavioral and Medical Health - ytJoschaBach="https://youtu.be/cC1HszE5Hcw?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=8984" - ytSamHarris="https://www.youtube.com/watch?v=4dC_nRYIDZU&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=2" - ytJohnAbramson="https://www.youtube.com/watch?v=arrokG3wCdE&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=3" - ytElonMusk="https://www.youtube.com/watch?v=DxREm3s1scA&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=4" - ytJeffreyShainline="https://www.youtube.com/watch?v=EwueqdgIvq4&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=5" - ytJeffHawkins="https://www.youtube.com/watch?v=Z1KwkpTUbkg&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=6" - ytSamHarris="https://youtu.be/Ui38ZzTymDY?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L" - ytSamHarris="https://youtu.be/4dC_nRYIDZU?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=7809" - ytSamHarris="https://youtu.be/4dC_nRYIDZU?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=7809" - ytSamHarris="https://youtu.be/4dC_nRYIDZU?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=7809" - ytTimelapseAI="https://www.youtube.com/watch?v=63yr9dlI0cU&list=PLHgX2IExbFovQybyfltywXnqZi5YvaSS-" - state.youtube_url = st.text_input("YouTube URL", ytTimelapseAI) - - - state.chunk_duration_ms = st.slider("Audio chunk duration (ms)", 2000, 10000, 3000, 100) - state.pad_duration_ms = st.slider("Padding duration (ms)", 100, 5000, 1000, 100) - submit_button = st.form_submit_button(label="Submit") - - if submit_button or "asr_stream" not in state: - # a hack to update the video player on value changes - state.youtube_url = ( - state.youtube_url.split("&hash=")[0] - + f"&hash={state.chunk_duration_ms}-{state.pad_duration_ms}" - ) - state.asr_stream = stream_text( - state.youtube_url, state.chunk_duration_ms, state.pad_duration_ms - ) - state.chunks_taken = 0 - - - state.lines = deque([], maxlen=100) # limit to the last n lines of subs - - - player = st_player(state.youtube_url, **player_options, key="youtube_player") - - if "asr_stream" in state and player.data and player.data["played"] < 1.0: - # check how many seconds were played, and if more than processed - write the next text chunk - processed_seconds = state.chunks_taken * (state.chunk_duration_ms / 1000) - if processed_seconds < player.data["playedSeconds"]: - text = next(state.asr_stream) - state.lines.append(text) - state.chunks_taken += 1 - if "lines" in state: - # print the lines of subs - st.code("\n".join(state.lines)) - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/leogabraneth/text-generation-webui-main/extensions/character_bias/script.py b/spaces/leogabraneth/text-generation-webui-main/extensions/character_bias/script.py deleted file mode 100644 index ff12f3afdc28be4ead12ffab90bd9fbd783514a2..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/extensions/character_bias/script.py +++ /dev/null @@ -1,83 +0,0 @@ -import os - -import gradio as gr - -# get the current directory of the script -current_dir = os.path.dirname(os.path.abspath(__file__)) - -# check if the bias_options.txt file exists, if not, create it -bias_file = os.path.join(current_dir, "bias_options.txt") -if not os.path.isfile(bias_file): - with open(bias_file, "w") as f: - f.write("*I am so happy*\n*I am so sad*\n*I am so excited*\n*I am so bored*\n*I am so angry*") - -# read bias options from the text file -with open(bias_file, "r") as f: - bias_options = [line.strip() for line in f.readlines()] - -params = { - "activate": True, - "bias string": " *I am so happy*", - "use custom string": False, -} - - -def input_modifier(string): - """ - This function is applied to your text inputs before - they are fed into the model. - """ - return string - - -def output_modifier(string): - """ - This function is applied to the model outputs. - """ - return string - - -def bot_prefix_modifier(string): - """ - This function is only applied in chat mode. It modifies - the prefix text for the Bot and can be used to bias its - behavior. - """ - if params['activate']: - if params['use custom string']: - return f'{string} {params["custom string"].strip()} ' - else: - return f'{string} {params["bias string"].strip()} ' - else: - return string - - -def ui(): - # Gradio elements - activate = gr.Checkbox(value=params['activate'], label='Activate character bias') - dropdown_string = gr.Dropdown(choices=bias_options, value=params["bias string"], label='Character bias', info='To edit the options in this dropdown edit the "bias_options.txt" file') - use_custom_string = gr.Checkbox(value=False, label='Use custom bias textbox instead of dropdown') - custom_string = gr.Textbox(value="", placeholder="Enter custom bias string", label="Custom Character Bias", info='To use this textbox activate the checkbox above') - - # Event functions to update the parameters in the backend - def update_bias_string(x): - if x: - params.update({"bias string": x}) - else: - params.update({"bias string": dropdown_string.get()}) - return x - - def update_custom_string(x): - params.update({"custom string": x}) - - dropdown_string.change(update_bias_string, dropdown_string, None) - custom_string.change(update_custom_string, custom_string, None) - activate.change(lambda x: params.update({"activate": x}), activate, None) - use_custom_string.change(lambda x: params.update({"use custom string": x}), use_custom_string, None) - - # Group elements together depending on the selected option - def bias_string_group(): - if use_custom_string.value: - return gr.Group([use_custom_string, custom_string]) - else: - return dropdown_string diff --git a/spaces/leogabraneth/text-generation-webui-main/extensions/silero_tts/script.py b/spaces/leogabraneth/text-generation-webui-main/extensions/silero_tts/script.py deleted file mode 100644 index 234338dda16c83a797ce1b451d51c38260c0ef23..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/extensions/silero_tts/script.py +++ /dev/null @@ -1,240 +0,0 @@ -import html -import json -import random -import time -from pathlib import Path - -import gradio as gr -import torch - -from extensions.silero_tts import tts_preprocessor -from modules import chat, shared, ui_chat -from modules.utils import gradio - -torch._C._jit_set_profiling_mode(False) - - -params = { - 'activate': True, - 'speaker': 'en_56', - 'language': 'English', - 'model_id': 'v3_en', - 'sample_rate': 48000, - 'device': 'cpu', - 'show_text': False, - 'autoplay': True, - 'voice_pitch': 'medium', - 'voice_speed': 'medium', - 'local_cache_path': '' # User can override the default cache path to something other via settings.json -} - -current_params = params.copy() - -with open(Path("extensions/silero_tts/languages.json"), encoding='utf8') as f: - languages = json.load(f) - -voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high'] -voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast'] - -# Used for making text xml compatible, needed for voice pitch and speed control -table = str.maketrans({ - "<": "<", - ">": ">", - "&": "&", - "'": "'", - '"': """, -}) - - -def xmlesc(txt): - return txt.translate(table) - - -def load_model(): - torch_cache_path = torch.hub.get_dir() if params['local_cache_path'] == '' else params['local_cache_path'] - model_path = torch_cache_path + "/snakers4_silero-models_master/src/silero/model/" + params['model_id'] + ".pt" - if Path(model_path).is_file(): - print(f'\nUsing Silero TTS cached checkpoint found at {torch_cache_path}') - model, example_text = torch.hub.load(repo_or_dir=torch_cache_path + '/snakers4_silero-models_master/', model='silero_tts', language=languages[params['language']]["lang_id"], speaker=params['model_id'], source='local', path=model_path, force_reload=True) - else: - print(f'\nSilero TTS cache not found at {torch_cache_path}. Attempting to download...') - model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=languages[params['language']]["lang_id"], speaker=params['model_id']) - model.to(params['device']) - return model - - -def remove_tts_from_history(history): - for i, entry in enumerate(history['internal']): - history['visible'][i] = [history['visible'][i][0], entry[1]] - - return history - - -def toggle_text_in_history(history): - for i, entry in enumerate(history['visible']): - visible_reply = entry[1] - if visible_reply.startswith('')[0]}\n\n{reply}"] - else: - history['visible'][i] = [history['visible'][i][0], f"{visible_reply.split('')[0]}"] - - return history - - -def state_modifier(state): - if not params['activate']: - return state - - state['stream'] = False - return state - - -def input_modifier(string, state): - if not params['activate']: - return string - - shared.processing_message = "*Is recording a voice message...*" - return string - - -def history_modifier(history): - # Remove autoplay from the last reply - if len(history['internal']) > 0: - history['visible'][-1] = [ - history['visible'][-1][0], - history['visible'][-1][1].replace('controls autoplay>', 'controls>') - ] - - return history - - -def output_modifier(string, state): - global model, current_params, streaming_state - - for i in params: - if params[i] != current_params[i]: - model = load_model() - current_params = params.copy() - break - - if not params['activate']: - return string - - original_string = string - - string = tts_preprocessor.preprocess(html.unescape(string)) - - if string == '': - string = '*Empty reply, try regenerating*' - else: - output_file = Path(f'extensions/silero_tts/outputs/{state["character_menu"]}_{int(time.time())}.wav') - prosody = ''.format(params['voice_speed'], params['voice_pitch']) - silero_input = f'{prosody}{xmlesc(string)}' - model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file)) - - autoplay = 'autoplay' if params['autoplay'] else '' - string = f'' - if params['show_text']: - string += f'\n\n{original_string}' - - shared.processing_message = "*Is typing...*" - return string - - -def setup(): - global model - model = load_model() - - -def random_sentence(): - with open(Path("extensions/silero_tts/harvard_sentences.txt")) as f: - return random.choice(list(f)) - - -def voice_preview(string): - global model, current_params, streaming_state - - for i in params: - if params[i] != current_params[i]: - model = load_model() - current_params = params.copy() - break - - string = tts_preprocessor.preprocess(string or random_sentence()) - - output_file = Path('extensions/silero_tts/outputs/voice_preview.wav') - prosody = f"" - silero_input = f'{prosody}{xmlesc(string)}' - model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file)) - - return f'' - - -def language_change(lang): - global params - params.update({"language": lang, "speaker": languages[lang]["default_voice"], "model_id": languages[lang]["model_id"]}) - return gr.update(choices=languages[lang]["voices"], value=languages[lang]["default_voice"]) - - -def custom_css(): - path_to_css = Path(__file__).parent.resolve() / 'style.css' - return open(path_to_css, 'r').read() - - -def ui(): - # Gradio elements - with gr.Accordion("Silero TTS"): - with gr.Row(): - activate = gr.Checkbox(value=params['activate'], label='Activate TTS') - autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically') - - show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player') - - with gr.Row(): - language = gr.Dropdown(value=params['language'], choices=sorted(languages.keys()), label='Language') - voice = gr.Dropdown(value=params['speaker'], choices=languages[params['language']]["voices"], label='TTS voice') - with gr.Row(): - v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch') - v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed') - - with gr.Row(): - preview_text = gr.Text(show_label=False, placeholder="Preview text", elem_id="silero_preview_text") - preview_play = gr.Button("Preview") - preview_audio = gr.HTML(visible=False) - - with gr.Row(): - convert = gr.Button('Permanently replace audios with the message texts') - convert_cancel = gr.Button('Cancel', visible=False) - convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False) - - # Convert history with confirmation - convert_arr = [convert_confirm, convert, convert_cancel] - convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr) - convert_confirm.click( - lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then( - remove_tts_from_history, gradio('history'), gradio('history')).then( - chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then( - chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display')) - - convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr) - - # Toggle message text in history - show_text.change( - lambda x: params.update({"show_text": x}), show_text, None).then( - toggle_text_in_history, gradio('history'), gradio('history')).then( - chat.save_history, gradio('history', 'unique_id', 'character_menu', 'mode'), None).then( - chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display')) - - # Event functions to update the parameters in the backend - activate.change(lambda x: params.update({"activate": x}), activate, None) - autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None) - language.change(language_change, language, voice, show_progress=False) - voice.change(lambda x: params.update({"speaker": x}), voice, None) - v_pitch.change(lambda x: params.update({"voice_pitch": x}), v_pitch, None) - v_speed.change(lambda x: params.update({"voice_speed": x}), v_speed, None) - - # Play preview - preview_text.submit(voice_preview, preview_text, preview_audio) - preview_play.click(voice_preview, preview_text, preview_audio) diff --git a/spaces/leurez/moss/service/src/middleware/limiter.ts b/spaces/leurez/moss/service/src/middleware/limiter.ts deleted file mode 100644 index d4df1493951662750df33d24fd1c9b1b8a1618d8..0000000000000000000000000000000000000000 --- a/spaces/leurez/moss/service/src/middleware/limiter.ts +++ /dev/null @@ -1,19 +0,0 @@ -import { rateLimit } from 'express-rate-limit' -import { isNotEmptyString } from '../utils/is' - -const MAX_REQUEST_PER_HOUR = process.env.MAX_REQUEST_PER_HOUR - -const maxCount = (isNotEmptyString(MAX_REQUEST_PER_HOUR) && !isNaN(Number(MAX_REQUEST_PER_HOUR))) - ? parseInt(MAX_REQUEST_PER_HOUR) - : 0 // 0 means unlimited - -const limiter = rateLimit({ - windowMs: 60 * 60 * 1000, // Maximum number of accesses within an hour - max: maxCount, - statusCode: 200, // 200 means success,but the message is 'Too many request from this IP in 1 hour' - message: async (req, res) => { - res.send({ status: 'Fail', message: 'Too many request from this IP in 1 hour', data: null }) - }, -}) - -export { limiter } diff --git a/spaces/lewisliuX123/wechatglm_demo/README.md b/spaces/lewisliuX123/wechatglm_demo/README.md deleted file mode 100644 index 5526afb9515ec671563704bf807b90632ead99f7..0000000000000000000000000000000000000000 --- a/spaces/lewisliuX123/wechatglm_demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: wechat-bot -emoji: 👀 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -duplicated_from: lewisliuX123/wechatgpt35 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/lewtun/stable-diffusion-demo/README.md b/spaces/lewtun/stable-diffusion-demo/README.md deleted file mode 100644 index d8eecd32b32e7aa37d693fae6218f693c44e20cf..0000000000000000000000000000000000000000 --- a/spaces/lewtun/stable-diffusion-demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stable Diffusion Demo -emoji: 📉 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/limingcv/AlignDet/finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/fcos_r50_fpn_1x_coco.py b/spaces/limingcv/AlignDet/finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/fcos_r50_fpn_1x_coco.py deleted file mode 100644 index 81bb9c0ce60da98b80668c93fc688c7969b06067..0000000000000000000000000000000000000000 --- a/spaces/limingcv/AlignDet/finetune/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5/fcos_r50_fpn_1x_coco.py +++ /dev/null @@ -1,197 +0,0 @@ -model = dict( - type='FCOS', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='SyncBN', requires_grad=True), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5, - relu_before_extra_convs=True, - norm_cfg=dict(type='SyncBN', requires_grad=True)), - bbox_head=dict( - type='FCOSHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_train2017.json', - img_prefix='data/coco/train2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) - ]), - val=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ]), - test=dict( - type='CocoDataset', - ann_file='data/coco/annotations/instances_val2017.json', - img_prefix='data/coco/val2017/', - pipeline=[ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict( - type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']) - ]) - ])) -evaluation = dict( - interval=1, metric='bbox', save_best='auto', gpu_collect=True) -optimizer = dict( - type='SGD', - lr=0.015, - momentum=0.9, - weight_decay=5e-05, - paramwise_cfg=dict(bias_lr_mult=2.0, bias_decay_mult=0.0)) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -runner = dict(type='EpochBasedRunner', max_epochs=12) -checkpoint_config = dict(interval=1) -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) -custom_hooks = [ - dict(type='NumClassCheckHook'), - dict( - type='MMDetWandbHook', - init_kwargs=dict(project='I2B', group='finetune'), - interval=50, - num_eval_images=0, - log_checkpoint=False) -] -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = 'pretrain/selfsup_fcos_mstrain-soft-teacher_sampler-2048_temp0.5/final_model.pth' -resume_from = None -workflow = [('train', 1)] -opencv_num_threads = 0 -mp_start_method = 'fork' -auto_scale_lr = dict(enable=False, base_batch_size=16) -custom_imports = None -norm_cfg = dict(type='SyncBN', requires_grad=True) -work_dir = 'work_dirs/finetune_fcos_1x_coco_lr1.5e-2_wd5e.5' -auto_resume = False -gpu_ids = range(0, 8) diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Download Film Jackie Chan Who Am I Full 11 !!HOT!!.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Download Film Jackie Chan Who Am I Full 11 !!HOT!!.md deleted file mode 100644 index 8bd42abe3e80a0314cb17b176a2771cabbed6fe4..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Download Film Jackie Chan Who Am I Full 11 !!HOT!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    download film jackie chan who am i full 11


    DOWNLOAD ✦✦✦ https://bytlly.com/2uGybs



    -
    -Download Film Jackie Chan Who Am I Full 11. film jackie chan, film jackie chan terbaru, film jackie chan terbaik, film jackie chan 2020, film ... 1fdad05405
    -
    -
    -

    diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Great Grand Masti 720p Movie Download [Extra Quality] Utorrent.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Great Grand Masti 720p Movie Download [Extra Quality] Utorrent.md deleted file mode 100644 index c18d04af3dec5dc181b6ab43f4140ecf86faadf0..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Great Grand Masti 720p Movie Download [Extra Quality] Utorrent.md +++ /dev/null @@ -1,11 +0,0 @@ -

    Great Grand Masti 720p movie download utorrent


    DOWNLOAD >> https://bytlly.com/2uGvNd



    -
    -Jul 18, 2016 - Great Grand Masti 2016 torrent HD movie download. Great Grand Masti 2016 Bollywood adult comedy film. The film is a sequel to the film Grand Masti ... Jun 03, 2013 - Great Grand Masti, the Indian film starring Rahul Gandhi and Kailash Kher is the latest film from the producer and director Ajith Simran. -Great Grand Masti is an Indian comedy film released in 2016. -The film is a sequel to Grand Masti. -The Great Grand Masti is based on the story of a man and woman living in a small village, who have a son and a daughter. -The Grand Masti is the latest Indian movie that hit theaters. -Great Grand Masti 2016 HD movie download. 8a78ff9644
    -
    -
    -

    diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Heartless Movie Download !NEW! 720p Torrents.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Heartless Movie Download !NEW! 720p Torrents.md deleted file mode 100644 index 64bdadcf61cd4a8357d42cd48b005c911fb18457..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Heartless Movie Download !NEW! 720p Torrents.md +++ /dev/null @@ -1,7 +0,0 @@ - -

    0. With issues like these, the new Hunger Games movie is not exactly the right ticket to end a Hollywood blockbuster season. It needs more characters and problem-solving than just a rush of action. There are a few things that could have made this film a trifle better. The people who created the film would have benefited from following a few standard-pattern stories and doubling or tripling up on the characters. For example, the people working on creating the Hunger Games series developed the idea of having a handful of children raised together by a single group of people who are dedicated to protecting them and making sure that they live safely and in joy for the rest of their lives. The characters in The Hunger Games follow this pattern; we care about some of them while feeling deeply for others; and we are shown what will happen to the main character because of her choices in the film's climax. This is what will be needed to make this film even better.

    -

    None of this is to suggest that the heartless economy is an unpredicted or unpredictable outcome of economic policy; it is precisely the policy of neoliberalism that has created it. As Anthony Giddens explains, the adoption of liberal economic theory

    -

    heartless movie download 720p torrents


    Download Zip ✶✶✶ https://bytlly.com/2uGxef



    -

    The Insidious trilogy are the story of three sisters growing up in a family that has more than it can handle and seems to be falling apart. It's largely a stilted, shallow teenage soap opera. I don't care, because the psychosomatic powers of these ghosts drive the movie. It's well made, and deliberately confusing, but hardly gripping.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Inferno Hindi Dubbed Movie 1080p Hd ((FULL)).md b/spaces/lincquiQcaudo/Top-20-Diffusion/Inferno Hindi Dubbed Movie 1080p Hd ((FULL)).md deleted file mode 100644 index eb376ba91487b10202b69cba99bb97bb4cab160b..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Inferno Hindi Dubbed Movie 1080p Hd ((FULL)).md +++ /dev/null @@ -1,87 +0,0 @@ - -

    Inferno Hindi Dubbed Movie 1080p HD: A Review

    -

    Inferno is a thriller movie based on the novel by Dan Brown, starring Tom Hanks as Robert Langdon, a professor of symbology who wakes up in a hospital in Florence with no memory of the past few days. He soon discovers that he is being hunted by a secret organization that wants to unleash a deadly virus that will wipe out half of the world's population. With the help of Dr. Sienna Brooks (Felicity Jones), a young doctor who saved his life, Langdon must follow clues hidden in Dante's Inferno, the epic poem about the nine circles of hell, to stop the sinister plot.

    -

    Inferno hindi dubbed movie 1080p hd


    DOWNLOAD ··· https://bytlly.com/2uGwcu



    -

    Inferno was released in 2016 and was directed by Ron Howard, who also directed the previous two movies based on Dan Brown's books, The Da Vinci Code and Angels & Demons. The movie was shot in various locations in Italy, Turkey and Hungary, and features stunning visuals and action sequences. The movie also stars Irrfan Khan as Harry Sims, a mysterious agent who helps Langdon; Ben Foster as Bertrand Zobrist, a billionaire geneticist who created the virus; and Omar Sy as Christoph Bouchard, a French agent who is after Langdon.

    -

    Inferno hindi dubbed movie 1080p hd is available for download or streaming on various platforms online. You can watch this movie in high definition quality with clear audio and subtitles in Hindi. Inferno hindi dubbed movie 1080p hd is a great choice for fans of thriller and mystery movies, as well as those who enjoy Dan Brown's novels and Tom Hanks' performances. The movie will keep you on the edge of your seat as you follow Langdon's journey through hell to save the world.

    -

    What to Expect from Inferno Hindi Dubbed Movie 1080p HD

    -

    Here are some of the things you can expect from Inferno hindi dubbed movie 1080p hd:

    -
      -
    • A fast-paced and gripping story that combines history, art, literature and science.
    • -
    • A brilliant performance by Tom Hanks as Robert Langdon, who shows his intelligence, courage and humor.
    • -
    • A strong supporting cast that includes Felicity Jones, Irrfan Khan, Ben Foster and Omar Sy.
    • -
    • A stunning cinematography that showcases the beauty and culture of Italy, Turkey and Hungary.
    • -
    • A thrilling soundtrack that matches the mood and tone of the movie.
    • -
    • A twisty and surprising ending that will leave you speechless.
    • -
    -

    How to Download or Stream Inferno Hindi Dubbed Movie 1080p HD

    -

    If you want to watch Inferno hindi dubbed movie 1080p hd, you have several options to choose from. You can either download the movie from a reliable source or stream it online from a legal platform. Here are some of the ways you can do that:

    -

    -
      -
    1. Download Inferno hindi dubbed movie 1080p hd from mkvCinemas.Com. This website offers high-quality movies in various formats and sizes. You can download Inferno hindi dubbed movie 1080p hd in BluRay quality with dual audio in Hindi and English. You can also choose from different resolutions such as 480p, 720p or 1080p. To download Inferno hindi dubbed movie 1080p hd from mkvCinemas.Com, you need to follow these steps:
    2. -
        -
      1. Go to https://ww4.mkvcinemas.lat/inferno-2016-movie-brrip-english-esub-300mb-480p-1gb-720p-f/
      2. -
      3. Scroll down to find the G-Drive [GDToT] Links section.
      4. -
      5. Select the resolution and size you want to download.
      6. -
      7. Click on the link and wait for a few seconds.
      8. -
      9. Verify that you are not a robot by completing a captcha.
      10. -
      11. Click on Download Now and enjoy your movie.
      12. -
      -
    3. Stream Inferno hindi dubbed movie 1080p hd from todaymovie.org. This website offers free online streaming of movies in HD quality with Hindi dubbing. You can watch Inferno hindi dubbed movie 1080p hd without any registration or subscription. You can also choose from different servers to stream the movie smoothly. To stream Inferno hindi dubbed movie 1080p hd from todaymovie.org, you need to follow these steps:
    4. -
        -
      1. Go to https://www.todaymovie.org/inferno-1999-hindi-dubbed/
      2. -
      3. Scroll down to find the player section.
      4. -
      5. Select the server you want to stream from.
      6. -
      7. Click on Play and enjoy your movie.
      8. -
      -

    -

    Why You Should Watch Inferno Hindi Dubbed Movie 1080p HD

    -

    Inferno hindi dubbed movie 1080p hd is a movie that will keep you entertained and engaged from start to finish. Here are some of the reasons why you should watch this movie:

    -
      -
    • It is based on a best-selling novel by Dan Brown, who is known for his thrilling and suspenseful stories that combine history, art, literature and science.
    • -
    • It features Tom Hanks, one of the most popular and versatile actors in Hollywood, who delivers a convincing and charismatic performance as Robert Langdon.
    • -
    • It has a captivating plot that involves a deadly virus, a secret organization, a global conspiracy and a race against time to save the world.
    • -
    • It has a lot of twists and turns that will keep you guessing and surprised until the end.
    • -
    • It has a lot of references and clues to Dante's Inferno, the epic poem about the nine circles of hell, which will make you curious and interested in learning more about it.
    • -
    -
    How to Enjoy Inferno Hindi Dubbed Movie 1080p HD
    -

    Inferno hindi dubbed movie 1080p hd is a movie that you can enjoy in different ways. Here are some of the tips on how to enjoy this movie:

    -
      -
    • Watch it with your friends or family who like thriller and mystery movies. You can discuss and share your opinions and insights about the movie.
    • -
    • Watch it with headphones or speakers to enjoy the thrilling soundtrack that matches the mood and tone of the movie.
    • -
    • Watch it on a big screen or a high-resolution device to appreciate the stunning cinematography that showcases the beauty and culture of Italy, Turkey and Hungary.
    • -
    • Watch it with subtitles in Hindi to understand the dialogues and follow the story better.
    • -
    • Watch it more than once to catch all the details and clues that you might have missed the first time.
    • -
    -The Challenges of Making Inferno Hindi Dubbed Movie 1080p HD -

    Inferno hindi dubbed movie 1080p hd is a movie that faced many challenges during its production and release. Here are some of the challenges that the movie had to overcome:

    -
      -
    • The movie was based on a controversial novel by Dan Brown, who is known for his controversial and provocative stories that challenge religious beliefs and historical facts.
    • -
    • The movie had to deal with a complex and intricate plot that involved many characters, locations, clues and codes that had to be translated and adapted for different languages and cultures.
    • -
    • The movie had to compete with other blockbuster movies that were released in the same year, such as Doctor Strange, Fantastic Beasts and Where to Find Them, and Rogue One: A Star Wars Story.
    • -
    • The movie had to face criticism and backlash from some critics and audiences who found the movie boring, confusing, or disappointing compared to the novel or the previous movies.
    • -
    • The movie had to cope with the health issues of Tom Hanks, who was diagnosed with type 2 diabetes in 2013 and had to lose weight and manage his blood sugar levels during the filming.
    • -
    -The Future of Inferno Hindi Dubbed Movie 1080p HD -

    Inferno hindi dubbed movie 1080p hd is a movie that has a lot of potential for the future. Here are some of the possibilities for the future of this movie:

    -
      -
    • The movie could have a sequel based on the next novel by Dan Brown, Origin, which was published in 2017 and features Robert Langdon as the protagonist again.
    • -
    • The movie could have a spin-off based on the character of Harry Sims, played by Irrfan Khan, who was a mysterious agent who helped Langdon in the movie.
    • -
    • The movie could have a remake or a reboot with a different cast and director, who could bring a fresh perspective and vision to the story.
    • -
    • The movie could have a TV series or a web series that could explore more details and aspects of the story, such as the history, art, literature and science behind Dante's Inferno.
    • -
    • The movie could have a video game or a mobile game that could allow the players to interact with the story and solve the puzzles and codes themselves.
    • -
    -The Reviews of Inferno Hindi Dubbed Movie 1080p HD -

    Inferno hindi dubbed movie 1080p hd is a movie that has received mixed reviews from critics and audiences. Here are some of the reviews of this movie:

    -
      -
    • Roger Ebert of Chicago Sun-Times gave the movie 2 out of 4 stars and wrote: "Inferno is filled with such bewildering twists and turns that even with Tom Hanks as our guide, we are hard-pressed to follow the convoluted plot."
    • -
    • Richard Roeper of Chicago Tribune gave the movie 3 out of 4 stars and wrote: "Inferno is a solid thriller that delivers on its promise of globe-trotting intrigue, puzzles and codes, and a race against time to save humanity."
    • -
    • Shubhra Gupta of The Indian Express gave the movie 1.5 out of 5 stars and wrote: "Inferno is a dull and disappointing movie that wastes the talent of Tom Hanks and Irrfan Khan, and fails to live up to the expectations of Dan Brown's fans."
    • -
    • Rajeev Masand of CNN-News18 gave the movie 2 out of 5 stars and wrote: "Inferno is a mediocre and forgettable movie that lacks the thrill and excitement of the previous movies based on Dan Brown's books."
    • -
    -The Conclusion of Inferno Hindi Dubbed Movie 1080p HD -

    Inferno hindi dubbed movie 1080p hd is a movie that you can watch if you are a fan of thriller and mystery movies, or if you are interested in Dante's Inferno and its references. However, you might be disappointed if you are looking for a faithful adaptation of Dan Brown's novel, or if you are expecting a high-quality and original movie. Inferno hindi dubbed movie 1080p hd is a movie that has its pros and cons, but ultimately it is up to you to decide whether you want to watch it or not.

    -In Conclusion -

    In conclusion, Inferno hindi dubbed movie 1080p hd is a movie that has its strengths and weaknesses. It is based on a novel by Dan Brown, who is known for his thrilling and suspenseful stories that combine history, art, literature and science. It stars Tom Hanks as Robert Langdon, who tries to stop a global pandemic caused by a virus inspired by Dante's Inferno. It also features a strong supporting cast that includes Felicity Jones, Irrfan Khan, Ben Foster and Omar Sy. It has a captivating plot that involves puzzles, codes and symbols. It has stunning visuals and action sequences. It has a twisty and surprising ending. However, it also has many flaws and challenges. It is not faithful to the novel and changes many details and aspects of the story. It is confusing and complicated for some viewers who might not be familiar with Dante's Inferno or its references. It is boring and disappointing for some critics and audiences who expected more from the movie. It faced competition from other blockbuster movies that were released in the same year. It had to cope with the health issues of Tom Hanks, who had to lose weight and manage his blood sugar levels during the filming. Therefore, Inferno hindi dubbed movie 1080p hd is a movie that you can watch if you are interested in thriller and mystery movies, or if you want to learn more about Dante's Inferno and its references. However, you might not enjoy it if you are looking for a faithful adaptation of Dan Brown's novel, or if you want a high-quality and original movie. Inferno hindi dubbed movie 1080p hd is a movie that has its pros and cons, but ultimately it is up to you to decide whether you want to watch it or not.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/lingbionlp/PhenoTagger-Demo/src/post_processing.py b/spaces/lingbionlp/PhenoTagger-Demo/src/post_processing.py deleted file mode 100644 index e1d91ec2183471f4ef44eeeb992632addf941e2f..0000000000000000000000000000000000000000 --- a/spaces/lingbionlp/PhenoTagger-Demo/src/post_processing.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Jun 18 20:08:30 2020 - -@author: luol2 -""" - -def combine_overlap(mention_list): - - entity_list=[] - if len(mention_list)>2: - - first_entity=mention_list[0] - nest_list=[first_entity] - max_eid=int(first_entity[1]) - for i in range(1,len(mention_list)): - segs=mention_list[i] - if int(segs[0])> max_eid: - if len(nest_list)==1: - entity_list.append(nest_list[0]) - nest_list=[] - nest_list.append(segs) - if int(segs[1])>max_eid: - max_eid=int(segs[1]) - else: - tem=find_max_entity(nest_list)#find max entity - entity_list.append(tem) - nest_list=[] - nest_list.append(segs) - if int(segs[1])>max_eid: - max_eid=int(segs[1]) - - else: - nest_list.append(segs) - if int(segs[1])>max_eid: - max_eid=int(segs[1]) - if nest_list!=[]: - if len(nest_list)==1: - entity_list.append(nest_list[0]) - - else: - tem=find_max_entity(nest_list)#find max entity - entity_list.append(tem) - else: - entity_list=mention_list - - return entity_list - -def find_max_entity(nest_list): - max_len=0 - max_entity=[] - for i in range(0, len(nest_list)): - length=int(nest_list[i][1])-int(nest_list[i][0]) - if length>max_len: - max_len=length - max_entity=nest_list[i] - - return max_entity \ No newline at end of file diff --git a/spaces/lllqqq/so-vits-svc-models-pcr/vdecoder/hifigan/models.py b/spaces/lllqqq/so-vits-svc-models-pcr/vdecoder/hifigan/models.py deleted file mode 100644 index 9747301f350bb269e62601017fe4633ce271b27e..0000000000000000000000000000000000000000 --- a/spaces/lllqqq/so-vits-svc-models-pcr/vdecoder/hifigan/models.py +++ /dev/null @@ -1,503 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -def padDiff(x): - return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0) - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (padDiff(tmp_over_one)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/locknsw/nomic-ai-gpt4all-13b-snoozy/app.py b/spaces/locknsw/nomic-ai-gpt4all-13b-snoozy/app.py deleted file mode 100644 index 09d5b9bc4fea472062d0686382f48dc0cd1a3512..0000000000000000000000000000000000000000 --- a/spaces/locknsw/nomic-ai-gpt4all-13b-snoozy/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/nomic-ai/gpt4all-13b-snoozy").launch() \ No newline at end of file diff --git a/spaces/ltg/chat-nort5/norquad/README.md b/spaces/ltg/chat-nort5/norquad/README.md deleted file mode 100644 index 85231150b79e27284dc4f2877e2ba6faecd65049..0000000000000000000000000000000000000000 --- a/spaces/ltg/chat-nort5/norquad/README.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -language: -- 'no' -- nb -- nn -inference: false -tags: -- BERT -- NorBERT -- Norwegian -- encoder -license: cc-by-4.0 ---- - -# NorBERT 3 large - - -## Other sizes: -- [NorBERT 3 xs (15M)](https://huggingface.co/ltg/norbert3-xs) -- [NorBERT 3 small (40M)](https://huggingface.co/ltg/norbert3-small) -- [NorBERT 3 base (123M)](https://huggingface.co/ltg/norbert3-base) -- [NorBERT 3 large (323M)](https://huggingface.co/ltg/norbert3-large) - - -## Example usage - -This model currently needs a custom wrapper from `modeling_norbert.py`. Then you can use it like this: - -```python -import torch -from transformers import AutoTokenizer -from modeling_norbert import NorbertForMaskedLM - -tokenizer = AutoTokenizer.from_pretrained("path/to/folder") -bert = NorbertForMaskedLM.from_pretrained("path/to/folder") - -mask_id = tokenizer.convert_tokens_to_ids("[MASK]") -input_text = tokenizer("Nå ønsker de seg en[MASK] bolig.", return_tensors="pt") -output_p = bert(**input_text) -output_text = torch.where(input_text.input_ids == mask_id, output_p.logits.argmax(-1), input_text.input_ids) - -# should output: '[CLS] Nå ønsker de seg en ny bolig.[SEP]' -print(tokenizer.decode(output_text[0].tolist())) -``` - -The following classes are currently implemented: `NorbertForMaskedLM`, `NorbertForSequenceClassification`, `NorbertForTokenClassification`, `NorbertForQuestionAnswering` and `NorbertForMultipleChoice`. \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/thrust/thrust/allocate_unique.h b/spaces/ma-xu/LIVE/thrust/thrust/allocate_unique.h deleted file mode 100644 index 6e67d1b18a6dd8c4e8dd27a0f78531819489d6a4..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/allocate_unique.h +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright (c) 2018 NVIDIA Corporation -// Author: Bryce Adelstein Lelbach -// -// Distributed under the Boost Software License v1.0 (boost.org/LICENSE_1_0.txt) - -#pragma once - -#include -#include - -#if THRUST_CPP_DIALECT >= 2011 - -#include -#include -#include -#include - -#include -#include - -namespace thrust -{ - -// wg21.link/p0316r0 - -/////////////////////////////////////////////////////////////////////////////// - -namespace detail -{ - -template -void allocator_delete_impl( - Allocator const& alloc, Pointer p, std::false_type -) -{ - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >; - - typename traits::allocator_type alloc_T(alloc); - - if (nullptr != pointer_traits::get(p)) - { - traits::destroy(alloc_T, thrust::raw_pointer_cast(p)); - traits::deallocate(alloc_T, p, 1); - } -} - -template -void allocator_delete_impl( - Allocator const& alloc, Pointer p, std::true_type -) -{ - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >; - - typename traits::allocator_type alloc_T(alloc); - - if (nullptr != pointer_traits::get(p)) - { - traits::deallocate(alloc_T, p, 1); - } -} - -} // namespace detail - -template -struct allocator_delete final -{ - using allocator_type - = typename std::remove_cv< - typename std::remove_reference::type - >::type::template rebind::other; - using pointer = typename detail::allocator_traits::pointer; - - template - allocator_delete(UAllocator&& other) noexcept - : alloc_(THRUST_FWD(other)) - {} - - template - allocator_delete( - allocator_delete const& other - ) noexcept - : alloc_(other.get_allocator()) - {} - template - allocator_delete( - allocator_delete&& other - ) noexcept - : alloc_(std::move(other.get_allocator())) - {} - - template - allocator_delete& operator=( - allocator_delete const& other - ) noexcept - { - alloc_ = other.get_allocator(); - return *this; - } - template - allocator_delete& operator=( - allocator_delete&& other - ) noexcept - { - alloc_ = std::move(other.get_allocator()); - return *this; - } - - void operator()(pointer p) - { - std::integral_constant ic; - - detail::allocator_delete_impl(get_allocator(), p, ic); - } - - allocator_type& get_allocator() noexcept { return alloc_; } - allocator_type const& get_allocator() const noexcept { return alloc_; } - - void swap(allocator_delete& other) noexcept - { - using std::swap; - swap(alloc_, other.alloc_); - } - -private: - allocator_type alloc_; -}; - -template -using uninitialized_allocator_delete = allocator_delete; - -namespace detail { - -template -void array_allocator_delete_impl( - Allocator const& alloc, Pointer p, Size count, std::false_type -) -{ - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >; - - typename traits::allocator_type alloc_T(alloc); - - if (nullptr != pointer_traits::get(p)) - { - destroy_n(alloc_T, p, count); - traits::deallocate(alloc_T, p, count); - } -} - -template -void array_allocator_delete_impl( - Allocator const& alloc, Pointer p, Size count, std::true_type -) -{ - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >; - - typename traits::allocator_type alloc_T(alloc); - - if (nullptr != pointer_traits::get(p)) - { - traits::deallocate(alloc_T, p, count); - } -} - -} // namespace detail - -template -struct array_allocator_delete final -{ - using allocator_type - = typename std::remove_cv< - typename std::remove_reference::type - >::type::template rebind::other; - using pointer = typename detail::allocator_traits::pointer; - - template - array_allocator_delete(UAllocator&& other, std::size_t n) noexcept - : alloc_(THRUST_FWD(other)), count_(n) - {} - - template - array_allocator_delete( - array_allocator_delete const& other - ) noexcept - : alloc_(other.get_allocator()), count_(other.count_) - {} - template - array_allocator_delete( - array_allocator_delete&& other - ) noexcept - : alloc_(std::move(other.get_allocator())), count_(other.count_) - {} - - template - array_allocator_delete& operator=( - array_allocator_delete const& other - ) noexcept - { - alloc_ = other.get_allocator(); - count_ = other.count_; - return *this; - } - template - array_allocator_delete& operator=( - array_allocator_delete&& other - ) noexcept - { - alloc_ = std::move(other.get_allocator()); - count_ = other.count_; - return *this; - } - - void operator()(pointer p) - { - std::integral_constant ic; - - detail::array_allocator_delete_impl(get_allocator(), p, count_, ic); - } - - allocator_type& get_allocator() noexcept { return alloc_; } - allocator_type const& get_allocator() const noexcept { return alloc_; } - - void swap(array_allocator_delete& other) noexcept - { - using std::swap; - swap(alloc_, other.alloc_); - swap(count_, other.count_); - } - -private: - allocator_type alloc_; - std::size_t count_; -}; - -template -using uninitialized_array_allocator_delete - = array_allocator_delete; - -/////////////////////////////////////////////////////////////////////////////// - -template -struct tagged_deleter : Lambda -{ - __host__ __device__ - tagged_deleter(Lambda&& l) : Lambda(THRUST_FWD(l)) {} - - using pointer = Pointer; -}; - -template -__host__ __device__ -tagged_deleter -make_tagged_deleter(Lambda&& l) -{ - return tagged_deleter(THRUST_FWD(l)); -} - -/////////////////////////////////////////////////////////////////////////////// - -template -__host__ -std::unique_ptr< - T, - allocator_delete< - T - , typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits::allocator_type - > -> -allocate_unique( - Allocator const& alloc, Args&&... args -) -{ - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits; - - typename traits::allocator_type alloc_T(alloc); - - auto hold_deleter = make_tagged_deleter( - [&alloc_T] (typename traits::pointer p) { - traits::deallocate(alloc_T, p, 1); - } - ); - using hold_t = std::unique_ptr; - auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter); - - traits::construct( - alloc_T, thrust::raw_pointer_cast(hold.get()), THRUST_FWD(args)... - ); - auto deleter = allocator_delete(alloc); - return std::unique_ptr - (hold.release(), std::move(deleter)); -} - -template -__host__ -std::unique_ptr< - T, - uninitialized_allocator_delete< - T - , typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits::allocator_type - > -> -uninitialized_allocate_unique( - Allocator const& alloc -) -{ - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits; - - typename traits::allocator_type alloc_T(alloc); - - auto hold_deleter = make_tagged_deleter( - [&alloc_T] (typename traits::pointer p) { - traits::deallocate(alloc_T, p, 1); - } - ); - using hold_t = std::unique_ptr; - auto hold = hold_t(traits::allocate(alloc_T, 1), hold_deleter); - - auto deleter = uninitialized_allocator_delete< - T, typename traits::allocator_type - >(alloc_T); - return std::unique_ptr - (hold.release(), std::move(deleter)); -} - -template -__host__ -std::unique_ptr< - T[], - array_allocator_delete< - T - , typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits::allocator_type - > -> -allocate_unique_n( - Allocator const& alloc, Size n, Args&&... args -) -{ - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits; - - typename traits::allocator_type alloc_T(alloc); - - auto hold_deleter = make_tagged_deleter( - [n, &alloc_T] (typename traits::pointer p) { - traits::deallocate(alloc_T, p, n); - } - ); - using hold_t = std::unique_ptr; - auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter); - - uninitialized_construct_n_with_allocator( - alloc_T, hold.get(), n, THRUST_FWD(args)... - ); - auto deleter = array_allocator_delete< - T, typename traits::allocator_type - >(alloc_T, n); - return std::unique_ptr - (hold.release(), std::move(deleter)); -} - -template -__host__ -std::unique_ptr< - T[], - uninitialized_array_allocator_delete< - T - , typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits::allocator_type - > -> -uninitialized_allocate_unique_n( - Allocator const& alloc, Size n -) -{ - using traits = typename detail::allocator_traits< - typename std::remove_cv< - typename std::remove_reference::type - >::type - >::template rebind_traits; - - typename traits::allocator_type alloc_T(alloc); - - auto hold_deleter = make_tagged_deleter( - [n, &alloc_T] (typename traits::pointer p) { - traits::deallocate(alloc_T, p, n); - } - ); - using hold_t = std::unique_ptr; - auto hold = hold_t(traits::allocate(alloc_T, n), hold_deleter); - - auto deleter = uninitialized_array_allocator_delete< - T, typename traits::allocator_type - >(alloc_T, n); - return std::unique_ptr - (hold.release(), std::move(deleter)); -} - -/////////////////////////////////////////////////////////////////////////////// - -} // end namespace thrust - -#endif // THRUST_CPP_DIALECT >= 2011 - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/per_device_resource.h b/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/per_device_resource.h deleted file mode 100644 index 1b8d61f92169e0e09c3821e59218f0dcbb70cbe5..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/per_device_resource.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2018 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system has no special per device resource functions - diff --git a/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/AdaptiveWingLoss/core/__init__.py b/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/AdaptiveWingLoss/core/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/matthoffner/chatbot/SECURITY.md b/spaces/matthoffner/chatbot/SECURITY.md deleted file mode 100644 index 42f79949474efbc61815647263aa005708780d22..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot/SECURITY.md +++ /dev/null @@ -1,53 +0,0 @@ -# Security Policy - - -This security policy outlines the process for reporting vulnerabilities and secrets found within this GitHub repository. It is essential that all contributors and users adhere to this policy in order to maintain a secure and stable environment. - -## Reporting a Vulnerability - -If you discover a vulnerability within the code, dependencies, or any other component of this repository, please follow these steps: - -1. **Do not disclose the vulnerability publicly.** Publicly disclosing a vulnerability may put the project at risk and could potentially harm other users. - -2. **Contact the repository maintainer(s) privately.** Send a private message or email to the maintainer(s) with a detailed description of the vulnerability. Include the following information: - - - The affected component(s) - - Steps to reproduce the issue - - Potential impact of the vulnerability - - Any possible mitigations or workarounds - -3. **Wait for a response from the maintainer(s).** Please be patient, as they may need time to investigate and verify the issue. The maintainer(s) should acknowledge receipt of your report and provide an estimated time frame for addressing the vulnerability. - -4. **Cooperate with the maintainer(s).** If requested, provide additional information or assistance to help resolve the issue. - -5. **Do not disclose the vulnerability until the maintainer(s) have addressed it.** Once the issue has been resolved, the maintainer(s) may choose to publicly disclose the vulnerability and credit you for the discovery. - -## Reporting Secrets - -If you discover any secrets, such as API keys or passwords, within the repository, follow these steps: - -1. **Do not share the secret or use it for unauthorized purposes.** Misusing a secret could have severe consequences for the project and its users. - -2. **Contact the repository maintainer(s) privately.** Notify them of the discovered secret, its location, and any potential risks associated with it. - -3. **Wait for a response and further instructions.** - -## Responsible Disclosure - -We encourage responsible disclosure of vulnerabilities and secrets. If you follow the steps outlined in this policy, we will work with you to understand and address the issue. We will not take legal action against individuals who discover and report vulnerabilities or secrets in accordance with this policy. - -## Patching and Updates - -We are committed to maintaining the security of our project. When vulnerabilities are reported and confirmed, we will: - -1. Work diligently to develop and apply a patch or implement a mitigation strategy. -2. Keep the reporter informed about the progress of the fix. -3. Update the repository with the necessary patches and document the changes in the release notes or changelog. -4. Credit the reporter for the discovery, if they wish to be acknowledged. - -## Contributing to Security - -We welcome contributions that help improve the security of our project. If you have suggestions or want to contribute code to address security issues, please follow the standard contribution guidelines for this repository. When submitting a pull request related to security, please mention that it addresses a security issue and provide any necessary context. - -By adhering to this security policy, you contribute to the overall security and stability of the project. Thank you for your cooperation and responsible handling of vulnerabilities and secrets. - diff --git a/spaces/megaaziib/hololive-rvc-models-v2/rmvpe.py b/spaces/megaaziib/hololive-rvc-models-v2/rmvpe.py deleted file mode 100644 index 3ad346141340e03bdbaa20121e1ed435bb3da57a..0000000000000000000000000000000000000000 --- a/spaces/megaaziib/hololive-rvc-models-v2/rmvpe.py +++ /dev/null @@ -1,432 +0,0 @@ -import sys, torch, numpy as np, traceback, pdb -import torch.nn as nn -from time import time as ttime -import torch.nn.functional as F - - -class BiGRU(nn.Module): - def __init__(self, input_features, hidden_features, num_layers): - super(BiGRU, self).__init__() - self.gru = nn.GRU( - input_features, - hidden_features, - num_layers=num_layers, - batch_first=True, - bidirectional=True, - ) - - def forward(self, x): - return self.gru(x)[0] - - -class ConvBlockRes(nn.Module): - def __init__(self, in_channels, out_channels, momentum=0.01): - super(ConvBlockRes, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - nn.Conv2d( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=(1, 1), - padding=(1, 1), - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - if in_channels != out_channels: - self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1)) - self.is_shortcut = True - else: - self.is_shortcut = False - - def forward(self, x): - if self.is_shortcut: - return self.conv(x) + self.shortcut(x) - else: - return self.conv(x) + x - - -class Encoder(nn.Module): - def __init__( - self, - in_channels, - in_size, - n_encoders, - kernel_size, - n_blocks, - out_channels=16, - momentum=0.01, - ): - super(Encoder, self).__init__() - self.n_encoders = n_encoders - self.bn = nn.BatchNorm2d(in_channels, momentum=momentum) - self.layers = nn.ModuleList() - self.latent_channels = [] - for i in range(self.n_encoders): - self.layers.append( - ResEncoderBlock( - in_channels, out_channels, kernel_size, n_blocks, momentum=momentum - ) - ) - self.latent_channels.append([out_channels, in_size]) - in_channels = out_channels - out_channels *= 2 - in_size //= 2 - self.out_size = in_size - self.out_channel = out_channels - - def forward(self, x): - concat_tensors = [] - x = self.bn(x) - for i in range(self.n_encoders): - _, x = self.layers[i](x) - concat_tensors.append(_) - return x, concat_tensors - - -class ResEncoderBlock(nn.Module): - def __init__( - self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01 - ): - super(ResEncoderBlock, self).__init__() - self.n_blocks = n_blocks - self.conv = nn.ModuleList() - self.conv.append(ConvBlockRes(in_channels, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv.append(ConvBlockRes(out_channels, out_channels, momentum)) - self.kernel_size = kernel_size - if self.kernel_size is not None: - self.pool = nn.AvgPool2d(kernel_size=kernel_size) - - def forward(self, x): - for i in range(self.n_blocks): - x = self.conv[i](x) - if self.kernel_size is not None: - return x, self.pool(x) - else: - return x - - -class Intermediate(nn.Module): # - def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01): - super(Intermediate, self).__init__() - self.n_inters = n_inters - self.layers = nn.ModuleList() - self.layers.append( - ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum) - ) - for i in range(self.n_inters - 1): - self.layers.append( - ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum) - ) - - def forward(self, x): - for i in range(self.n_inters): - x = self.layers[i](x) - return x - - -class ResDecoderBlock(nn.Module): - def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01): - super(ResDecoderBlock, self).__init__() - out_padding = (0, 1) if stride == (1, 2) else (1, 1) - self.n_blocks = n_blocks - self.conv1 = nn.Sequential( - nn.ConvTranspose2d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=(3, 3), - stride=stride, - padding=(1, 1), - output_padding=out_padding, - bias=False, - ), - nn.BatchNorm2d(out_channels, momentum=momentum), - nn.ReLU(), - ) - self.conv2 = nn.ModuleList() - self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum)) - for i in range(n_blocks - 1): - self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum)) - - def forward(self, x, concat_tensor): - x = self.conv1(x) - x = torch.cat((x, concat_tensor), dim=1) - for i in range(self.n_blocks): - x = self.conv2[i](x) - return x - - -class Decoder(nn.Module): - def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01): - super(Decoder, self).__init__() - self.layers = nn.ModuleList() - self.n_decoders = n_decoders - for i in range(self.n_decoders): - out_channels = in_channels // 2 - self.layers.append( - ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum) - ) - in_channels = out_channels - - def forward(self, x, concat_tensors): - for i in range(self.n_decoders): - x = self.layers[i](x, concat_tensors[-1 - i]) - return x - - -class DeepUnet(nn.Module): - def __init__( - self, - kernel_size, - n_blocks, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(DeepUnet, self).__init__() - self.encoder = Encoder( - in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels - ) - self.intermediate = Intermediate( - self.encoder.out_channel // 2, - self.encoder.out_channel, - inter_layers, - n_blocks, - ) - self.decoder = Decoder( - self.encoder.out_channel, en_de_layers, kernel_size, n_blocks - ) - - def forward(self, x): - x, concat_tensors = self.encoder(x) - x = self.intermediate(x) - x = self.decoder(x, concat_tensors) - return x - - -class E2E(nn.Module): - def __init__( - self, - n_blocks, - n_gru, - kernel_size, - en_de_layers=5, - inter_layers=4, - in_channels=1, - en_out_channels=16, - ): - super(E2E, self).__init__() - self.unet = DeepUnet( - kernel_size, - n_blocks, - en_de_layers, - inter_layers, - in_channels, - en_out_channels, - ) - self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1)) - if n_gru: - self.fc = nn.Sequential( - BiGRU(3 * 128, 256, n_gru), - nn.Linear(512, 360), - nn.Dropout(0.25), - nn.Sigmoid(), - ) - else: - self.fc = nn.Sequential( - nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid() - ) - - def forward(self, mel): - mel = mel.transpose(-1, -2).unsqueeze(1) - x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2) - x = self.fc(x) - return x - - -from librosa.filters import mel - - -class MelSpectrogram(torch.nn.Module): - def __init__( - self, - is_half, - n_mel_channels, - sampling_rate, - win_length, - hop_length, - n_fft=None, - mel_fmin=0, - mel_fmax=None, - clamp=1e-5, - ): - super().__init__() - n_fft = win_length if n_fft is None else n_fft - self.hann_window = {} - mel_basis = mel( - sr=sampling_rate, - n_fft=n_fft, - n_mels=n_mel_channels, - fmin=mel_fmin, - fmax=mel_fmax, - htk=True, - ) - mel_basis = torch.from_numpy(mel_basis).float() - self.register_buffer("mel_basis", mel_basis) - self.n_fft = win_length if n_fft is None else n_fft - self.hop_length = hop_length - self.win_length = win_length - self.sampling_rate = sampling_rate - self.n_mel_channels = n_mel_channels - self.clamp = clamp - self.is_half = is_half - - def forward(self, audio, keyshift=0, speed=1, center=True): - factor = 2 ** (keyshift / 12) - n_fft_new = int(np.round(self.n_fft * factor)) - win_length_new = int(np.round(self.win_length * factor)) - hop_length_new = int(np.round(self.hop_length * speed)) - keyshift_key = str(keyshift) + "_" + str(audio.device) - if keyshift_key not in self.hann_window: - self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to( - audio.device - ) - fft = torch.stft( - audio, - n_fft=n_fft_new, - hop_length=hop_length_new, - win_length=win_length_new, - window=self.hann_window[keyshift_key], - center=center, - return_complex=True, - ) - magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2)) - if keyshift != 0: - size = self.n_fft // 2 + 1 - resize = magnitude.size(1) - if resize < size: - magnitude = F.pad(magnitude, (0, 0, 0, size - resize)) - magnitude = magnitude[:, :size, :] * self.win_length / win_length_new - mel_output = torch.matmul(self.mel_basis, magnitude) - if self.is_half == True: - mel_output = mel_output.half() - log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp)) - return log_mel_spec - - -class RMVPE: - def __init__(self, model_path, is_half, device=None): - self.resample_kernel = {} - model = E2E(4, 1, (2, 2)) - ckpt = torch.load(model_path, map_location="cpu") - model.load_state_dict(ckpt) - model.eval() - if is_half == True: - model = model.half() - self.model = model - self.resample_kernel = {} - self.is_half = is_half - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - self.device = device - self.mel_extractor = MelSpectrogram( - is_half, 128, 16000, 1024, 160, None, 30, 8000 - ).to(device) - self.model = self.model.to(device) - cents_mapping = 20 * np.arange(360) + 1997.3794084376191 - self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368 - - def mel2hidden(self, mel): - with torch.no_grad(): - n_frames = mel.shape[-1] - mel = F.pad( - mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect" - ) - hidden = self.model(mel) - return hidden[:, :n_frames] - - def decode(self, hidden, thred=0.03): - cents_pred = self.to_local_average_cents(hidden, thred=thred) - f0 = 10 * (2 ** (cents_pred / 1200)) - f0[f0 == 10] = 0 - # f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred]) - return f0 - - def infer_from_audio(self, audio, thred=0.03): - audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0) - # torch.cuda.synchronize() - # t0=ttime() - mel = self.mel_extractor(audio, center=True) - # torch.cuda.synchronize() - # t1=ttime() - hidden = self.mel2hidden(mel) - # torch.cuda.synchronize() - # t2=ttime() - hidden = hidden.squeeze(0).cpu().numpy() - if self.is_half == True: - hidden = hidden.astype("float32") - f0 = self.decode(hidden, thred=thred) - # torch.cuda.synchronize() - # t3=ttime() - # print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0)) - return f0 - - def to_local_average_cents(self, salience, thred=0.05): - # t0 = ttime() - center = np.argmax(salience, axis=1) # 帧长#index - salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368 - # t1 = ttime() - center += 4 - todo_salience = [] - todo_cents_mapping = [] - starts = center - 4 - ends = center + 5 - for idx in range(salience.shape[0]): - todo_salience.append(salience[:, starts[idx] : ends[idx]][idx]) - todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]]) - # t2 = ttime() - todo_salience = np.array(todo_salience) # 帧长,9 - todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9 - product_sum = np.sum(todo_salience * todo_cents_mapping, 1) - weight_sum = np.sum(todo_salience, 1) # 帧长 - devided = product_sum / weight_sum # 帧长 - # t3 = ttime() - maxx = np.max(salience, axis=1) # 帧长 - devided[maxx <= thred] = 0 - # t4 = ttime() - # print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3)) - return devided - - -# if __name__ == '__main__': -# audio, sampling_rate = sf.read("卢本伟语录~1.wav") -# if len(audio.shape) > 1: -# audio = librosa.to_mono(audio.transpose(1, 0)) -# audio_bak = audio.copy() -# if sampling_rate != 16000: -# audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) -# model_path = "/bili-coeus/jupyter/jupyterhub-liujing04/vits_ch/test-RMVPE/weights/rmvpe_llc_half.pt" -# thred = 0.03 # 0.01 -# device = 'cuda' if torch.cuda.is_available() else 'cpu' -# rmvpe = RMVPE(model_path,is_half=False, device=device) -# t0=ttime() -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# f0 = rmvpe.infer_from_audio(audio, thred=thred) -# t1=ttime() -# print(f0.shape,t1-t0) diff --git a/spaces/merve/data-leak/source/_posts/2022-01-28-dataset-worldviews.md b/spaces/merve/data-leak/source/_posts/2022-01-28-dataset-worldviews.md deleted file mode 100644 index 67698648fc4d268a46f0b7f91c3c954b8508eb92..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/source/_posts/2022-01-28-dataset-worldviews.md +++ /dev/null @@ -1,194 +0,0 @@ ---- -permalink: /dataset-worldviews/ -template: post.html - -title: Datasets Have Worldviews -summary: Every dataset communicates a different perspective. When you shift your perspective, your conclusions can shift, too. -summaryalt: Every dataset communicates a different perspective. When you shift your perspective, your conclusions can shift, too. -shareimg: https://pair.withgoogle.com/explorables/images/dataset-worldviews-shareimg.png -date: 2022-01-28 ---- - - -

    Suppose you have a dataset of shapes. They can either be shaded or unshaded. They look something like this:

    - -
    - -

    You built a supervised machine learning classifier that will automatically classify each shape as shaded or unshaded. You call it the "Is-Shaded Classifier".

    - -

    Click "Run Classifier" to see how your model performs.

    -

    -
    -
    -
    - -

    It’s not perfect— some of the shapes are definitely misclassified. You want to improve your model!

    - -

    To do so, you want to know more about the kinds of mistakes your model is making.

    - -

    Thinking About Bias

    - -

    In training, you only gave your model the raw image of each shape and one ground truth label: shaded and unshaded. But maybe something about your model—the distribution of the training data you used, the architecture you chose, or how you set your hyperparameters—resulted in your model performing better on some shapes than others.

    - -

    In fact, you’ve seen a lot of papers and articles citing issues of biased model performance between circles, triangles, and rectangles in shape data. One paper finds that shape detection algorithms tend to do worse on triangles; another article says color accuracy is an issue with circles. So you wonder: are there biases in your model’s misclassifications?

    - -
    Three abstract drawings of papers or articles with headlines 'Shape detection: biased against triangles?', 'Geometry experts call for more accurate rectangle data, cite fairness concerns', and 'Increasing color accuracy in circles'
    - -

    You want to make sure that your model is performing equally well across circles, triangles, and rectangles, so you decide to do a fairness analysis.

    - -

    There’s just one issue: you don’t have labels for which of your shapes are circles, triangles, or rectangles.

    - -

    So, you decide to send your data to data labelers.

    - -
    Different shapes with an arrow pointing to a group of abstract people.
    - -

    You receive feedback from your data labeling team that they’re not sure what to do with the shapes that aren’t exactly circles, triangles, or rectangles.

    - -
    An image of a computer interface and the instructions 'Please select the name of the shape below'. There is a lumpy, blob-like shape with three checkboxes that say 'circle', 'triangle', and 'rectangle'. There is a text box with a question mark next to the interface.
    - -

    For the shapes that are unclear, you can have them use their best guess or simply label them as “other”. Then, you can finally do some fairness analysis!

    - -

    Below is the interface they see:

    - -
    - -

    These shapes should be labeled...

    -
    - -
    - -
    - -

    If you go back and change the labelers' instructions, which shapes do you perform worst on? Where do you find bias?

    - -

    You notice that your results hinge on how you choose to classify the shapes in our data.

    - -

    Because ultimately, this isn’t a world of only circles, triangles, and rectangles!

    - -

    Thinking About Classification

    - -

    What could we find out about our classifier's performance if we used different categories altogether?

    - -

    All shapes are basically...

    -

    Everything else should be labeled...

    - -

    -

    -

    -

    - -

    With each of the different categories, which shapes do you perform worst on? Where do you find bias?

    - -

    Each way of categorizing your shapes takes a different stance about what’s important . Each one makes some features more important than others, it make some distinctions visible and other distinctions invisible, and make some things easy to classify while others become outliers.

    - -

    And each one tells you something different about what kind of bias your classifier has!

    - -

    Grouping and Regrouping

    - -

    Here's another way to look at the same results. We can draw all the shapes that were correctly classified above the dashed line, and all the incorrectly classified shapes below it.

    - -
    - -

    We're still looking at the same model making the same classification on the same shapes, so the same shapes stay above and below the line. But each way of grouping the results distributes the errors differently— each way tells you something different.

    - -

    Labels Tell Stories

    - -

    The decisions you make about classification, however small…

    - -

    All shapes are basically...

    - -

    …begin to shape others’ decisions…

    - -
    - -

    …they shape the analysis you can do…

    - -
    - -

    …and they shape the kinds of conversations that happen.

    - -

    - -

    It’s natural to want to find a way out of this problem by gathering more features or collecting more data. If we just have enough detail on enough data, surely we can avoid making these kinds of decisions, right?

    - -

    Unfortunately, that isn’t the case. Describing the world around us in any way—whether we’re telling a friend a story or telling a computer about shapes—requires us to choose what information is important to convey and what tools we want to use to convey it.

    - -

    Whether we think about it or not, we’re always making choices about classification. -

    - -

    All people are basically... men or women

    -

    All food is basically... sweet or savory

    -

    All content is basically... kid-friendly or adult

    -

    All speech is basically... hate speech or acceptable speech

    - -

    All results are basically... significant or insignificant

    - -

    And as we saw with shapes, all of these choices make some features more important than others, make some distinctions visible and other distinctions invisible, and make some things easy to classify while others become outliers.

    - -

    In Practice

    - -

    Let’s take a closer look at how this plays out in real machine learning applications. One straightforward example is in supervised object detection tasks.

    - - -

    For example, let’s imagine we want to train an object detection model on a dataset including this image:

    - -

    Image of the Seattle skyline
    Source: Wikimedia Commons

    - -

    We could give it the following ground truth bounding boxes:

    - -

    Image of the Seattle skyline with boxes around several items in the picture with labels like 'building' and 'tree'.

    - -

    This looks objective, right? After all, a building is a building, a bush is a bush, and a mountain is a mountain!

    -

    But even labeling the same regions in the same image, you can communicate a very different perspective:

    - -

    Image of the Seattle skyline with boxes around several items in the picture, with labels like 'plant, non medicinal' and 'structure, nonreligious'.

    - -

    Or consider the image below, with several sets of “ground truth” labels. Looking at each of these labels, consider:

    - -

    What features matter? What gets labeled? Whose worldview comes through? What might you learn from this set of labels that you wouldn't learn from another?

    - -
    Source: Wikimedia Commons
    - -

    There is no “view from nowhere”, no universal way to organize every object, or word, or image. Datasets are always products of a particular time, place, and set of conditions; they are socially situated artifacts. They have histories; they have politics. And ignoring this fact has very real consequences.

    - -

    So what do we do with this information?

    - -

    A great place to start is to reflect on your own context and get curious about your data.

    - -

    If it’s hard to see a dataset’s values—if it feels “objective”, “universal”, or “neutral”—it may simply be reflecting a worldview you’re accustomed to. So, understanding the limitations of your own worldview can tell you about the limitations of “objective” data. What assumptions do you make about the world? What feels like common sense? What feels foreign?

    - -

    And do some sleuthing about your data! Who collected this data? Why was it collected? Who paid for it? Where did the “ground truth” come from?

    - -

    You might even find yourself questioning what kinds of assumptions underpin machine learning dataset development or even thinking more deeply about classification as a whole.

    - -

    If you find yourself with lots of questions, you're already off to a good start.

    - -

    -

    - -

    Credits

    - -

    Dylan Baker // January 2022

    -

    Thanks to Adam Pearce, Alex Hanna, Emily Denton, Fernanda Viégas, Kevin Robinson, Nithum Thain, Razvan Amironesei, and Vinodkumar Prabhakaran for their help with this piece.

    -

    - - - - - -

    More Explorables

    -

    -

    - - - - - - - - - - - - - diff --git a/spaces/merve/measuring-fairness/source/anonymization/make-estimates.js b/spaces/merve/measuring-fairness/source/anonymization/make-estimates.js deleted file mode 100644 index 46ed3feaf1acaccf35153c3ebaf5b60094b21daf..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/anonymization/make-estimates.js +++ /dev/null @@ -1,227 +0,0 @@ -window.makeEstimates = function(){ - var estimateScale = d3.scaleLinear() - .domain([.5 - .15, .5 + .15]).range([0, c.width]) - .interpolate(d3.interpolateRound) - - var jitterHeight = 90 - var rs = 4 // rect size - - var estimates = students[0].coinVals.map(d => ({val: .5, pctHead: .25, x: c.width/2, y: c.height - jitterHeight/2})) - var simulation = d3.forceSimulation(estimates) - .force('collide', d3.forceCollide(rs).strength(.1)) - .stop() - - function updateEstimates(){ - var selectedStudents = students.all.slice(0, sliders.population) - - selectedStudents[0].coinVals.map((_, i) => { - estimates[i].pctHead = d3.mean(selectedStudents, d => (d.coinVals[i] < sliders.headsProb) || d.plagerized) - - estimates[i].val = (1 - estimates[i].pctHead)/(1 - sliders.headsProb) - }) - updateSimulation(60) - } - updateEstimates() - - function updateSimulation(ticks=80, yStrength=.005){ - var variance = d3.variance(estimates, d => d.val) - var xStength = variance < .0005 ? .3 : .1 - - estimates.forEach(d => d.targetX = estimateScale(d.val)) - - simulation - .force('x', d3.forceX(d => d.targetX).strength(xStength)) - .force('y', d3.forceY(c.height - jitterHeight/2).strength(yStrength)) - .alpha(1) - // .alphaDecay(1 - Math.pow(0.001, 1/ticks)) - - for (var i = 0; i < ticks; ++i) simulation.tick() - - estimates.forEach(d => { - d.x = Math.round(d.x) - d.y = Math.round(d.y) - }) - } - updateSimulation(80, 1) - updateSimulation(80, .005) - - - // Set up DOM - var histogramSel = c.svg.append('g').translate([0, -25]) - var axisSel = histogramSel.append('g.axis.state.init-hidden') - var histogramAxis = axisSel.append('g') - - var numTicks = 6 - var xAxis = d3.axisTop(estimateScale).ticks(numTicks).tickFormat(d3.format('.0%')).tickSize(100) - - histogramAxis.call(xAxis).translate([.5, c.height + 5]) - middleTick = histogramAxis.selectAll('g').filter((d, i) => i === 3) - middleTick.select('text').classed('bold', 1) - middleTick.select('line').st({stroke: '#000'}) - - histogramAxis.append('text.bold') - .text('actual non-plagiarism rate') - .translate([c.width/2, 11]) - .st({fontSize: '10px'}) - - var containerSel = histogramSel.append('g#histogram').translate([0.5, .5]) - - - // Selection overlay to highlight individual estimates. - var selectSize = rs*2 + 2 - var selectColor = '#007276' - var rectFill = '#007276' - - var activeSel = histogramSel.append('g.active.init-hidden.axis') - .st({pointerEvents: 'none'}) - - activeSel.append('rect') - .at({width: selectSize, height: selectSize, stroke: selectColor, fill: 'none', strokeWidth: 3}) - .translate([-selectSize/2, -selectSize/2]) - - var activeTextHighlight = activeSel.append('rect') - .at({x: -32, width: 32*2, height: 18, y: -25, fill: 'rgba(255,255,255,.6)', rx: 10, ry: 10, xfill: 'red'}) - - var activeTextSel = activeSel.append('text.est-text.bold') - .text('34%') - .at({textAnchor: 'middle', textAnchor: 'middle', y: '-1em'}) - .st({fill: selectColor}) - - var activePathSel = activeSel.append('path') - .st({stroke: selectColor, strokeWidth: 3}) - - - // Update highlight DOM with current highlight - var curDrawData = {pctHead: .25, val: .5, x: c.width/2, y: c.height - jitterHeight/2} - function setActive(active, dur=0){ - if (active !== estimates.active){ - estimates.forEach(d => { - d.active = d == active - d.fy = d.active ? d.y : null - }) - estimates.active = active - } - - students.updateHeadsPos() - - - sel.flipCircle - .transition().duration(0).delay(d => d.i*5*(dur > 0 ? 1 : 0)) - .at({transform: d => slides && slides.curSlide && slides.curSlide.showFlipCircle && d.coinVals[active.index] < sliders.headsProb ? - 'scale(1)' : 'scale(.1)'}) - - - flipCoinTimer.stop() - if (dur){ - var objI = d3.interpolateObject(curDrawData, active) - - flipCoinTimer = d3.timer(ms => { - var t = d3.easeCubicInOut(d3.clamp(0, ms/dur, 1)) - drawData(objI(t)) - if (t == 1) flipCoinTimer.stop() - }) - } else{ - drawData(active) - } - - function drawData({pctHead, val, x, y}){ - activeSel.translate([x + rs/2, y + rs/2]) - activeTextSel.text('est. ' + d3.format('.1%')(val)) - activePathSel.at({d: `M ${selectSize/2*Math.sign(c.width/2 - x)} -1 H ${c.width/2 - x}`}) - - var error = Math.abs(val - .5) - var fmt = d3.format(".1%") - var pop = sliders.population - d3.select('.rand-text') - // .html(`${fmt(1 - pctHead)} of students said they had never plagerized. Since about half the students flipped heads and automatically reported plagerizism, we double that to estimate ${fmt(val)} of students haven't plagerized—${error > .1 ? '' : error > .07 ? 'a little ' : 'not '}far from the actual rate of ${fmt(.5)}`) - // .html(`${Math.round((1 - pctHead)*pop)} of ${pop} students said they had never plagiarized. Since about half the students flipped heads and automatically reported plagiarism, we double that rate to estimate ${fmt(val)} of students haven't plagiarized—${error > .4 ? '' : error > .07 ? 'a little ' : 'not '}far from the actual rate of ${fmt(.5)}`) - .html(`Here, ${fmt(1 - pctHead)} students said they had never plagiarized. Doubling that, we estimate ${fmt(val)} of students haven't plagiarized—${error > .1 ? 'quite ' : error > .07 ? 'a little ' : 'not '}far from the actual rate of ${fmt(.5)}`) - - curDrawData = {pctHead, val, x, y} - } - } - window.flipCoinTimer = d3.timer(d => d) - - - - var estimateSel = containerSel.appendMany('rect.estimate', estimates) - .at({width: rs, height: rs, stroke: '#fff', fill: rectFill, strokeWidth: .5}) - .st({fill: rectFill}) - .translate([rs/2, rs/2]) - .on('mouseover', (d, i) => { - if (window.slides.curSlide.showHistogram) { - setActive(d) - } - }) - - function setSelectorOpacity(textOpacity, strokeOpacity) { - activeTextSel.st({opacity: textOpacity}) - activeSel.st({opacity: strokeOpacity}) - activePathSel.st({opacity: strokeOpacity}) - } - - function render(transition=false){ - estimateSel.translate(d => [d.x, d.y]) - setActive(estimates.active) - - if (transition){ - if (window.flipAllCoinsTimer) window.flipAllCoinsTimer.stop() - window.flipAllCoinsTimer = d3.timer(ms => { - var t = d3.easeExpIn(d3.clamp(0, ms/5000, 1), 20) - if (flipAllCoinsTimer.forceEnd) t = 1 - - if (t > .028) { - setSelectorOpacity(textOpacity=0, strokeOpacity=0.7) - } - - var index = Math.floor((estimates.length - 2)*t) + 1 - estimateSel.classed('active', (d, i) => i <= index) - - setActive(estimates[index]) - // flipCoinsSel.text('Flip coins ' + d3.format('03')(index < 100 ? index : index + 1) + ' times') - flipCoinsSel.text('Flip coins 200 times') - - if (t == 1) { - flipAllCoinsTimer.stop() - setSelectorOpacity(textOpacity=1, strokeOpacity=1) - } - }) - } else { - setSelectorOpacity(textOpacity=1, strokeOpacity=1) - flipCoinsSel - } - } - window.flipAllCoinsTimer = d3.timer(d => d) - - - var flipCoinsSel = d3.select('.flip-coins').on('click', () => { - students.all.forEach(student => { - student.coinVals = student.coinVals.map(j => Math.random()) - }) - - updateEstimates() - render(true) - }) - - d3.select('.flip-coins-once').on('click', flipCoin) - function flipCoin(){ - active = estimates[0] - - students.all.forEach(student => { - student.coinVals = student.coinVals.map(j => Math.random()) - }) - - active.fy = active.y = c.height - jitterHeight/2 - updateEstimates() - - estimateSel.translate(d => [d.x, d.y]) - estimates.active = null - setActive(active, 1000) - } - - Object.assign(estimates, {updateEstimates, setActive, render, flipCoin, axisSel, containerSel, estimateSel, activeSel}) - - return estimates -} - -if (window.init) window.init() \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/source/private-and-fair/accuracy-v-privacy-class.js b/spaces/merve/measuring-fairness/source/private-and-fair/accuracy-v-privacy-class.js deleted file mode 100644 index 39daddb629006c967bfa8c3a6c1d43fc9887bc1b..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/source/private-and-fair/accuracy-v-privacy-class.js +++ /dev/null @@ -1,285 +0,0 @@ -var state = { - dataset_size: 15000, - threshold: .8, - label: 8 -} - -var sel = d3.select('.accuracy-v-privacy-class').html('') - .at({role: 'graphics-document', 'aria-label': `Line chart showing that high accuracy models can still perform poorly on some digit classes.`}) - -async function loadData(){ - var rawData = await util.getFile(`cns-cache/grid_${state.dataset_size}trainpoints_test_labels.csv`) - - rawData.forEach(d => { - delete d[''] - d.i = +d.i - d.label = +d.label - }) - - var aVal2Meta = {} - var metadata = await util.getFile('cns-cache/model_grid_test_accuracy.json') - metadata - .filter(d => d.dataset_size == state.dataset_size) - .forEach(d => aVal2Meta['aVal_' + d.aVal] = d) - - var allCols = d3.keys(rawData[0]) - .filter(d => d.includes('aVal')) - .map(key => { - var {epsilon, aVal} = aVal2Meta[key] - return {key, epsilon, aVal} - }) - - var byDigit = d3.nestBy(rawData, d => d.label) - byDigit.forEach(d => { - d.label = +d.key - }) - byDigit.forEach(digitClass => { - digitClass.cols = allCols.map(({key, epsilon}, colIndex) => { - return { - key, - colIndex, - epsilon, - digitClass, - label: digitClass.label, - accuracy: d3.mean(digitClass, d => d[key] > state.threshold) - } - }) - }) - - var data = _.flatten(byDigit.map(d => d.cols)) - .filter(d => util.epsilonExtent[1] <= d.epsilon && d.epsilon <= util.epsilonExtent[0]) - var byLabel = d3.nestBy(data, d => d.label) - byLabel.forEach((d, i) => { - d.label = d.key - }) - - return {data, byLabel} -} - - -async function initChart(){ - var {data, byLabel} = await loadData() - - var c = d3.conventions({ - sel: sel.append('div'), - height: 400, - margin: {bottom: 75, top: 5}, - layers: 'ds', - }) - - c.x = d3.scaleLog().domain(util.epsilonExtent).range(c.x.range()) - c.xAxis = d3.axisBottom(c.x).tickFormat(d => { - var rv = d + '' - if (rv.split('').filter(d => d !=0 && d != '.')[0] == 1) return rv - }) - - c.yAxis.tickFormat(d => d3.format('.0%')(d))//.ticks(8) - d3.drawAxis(c) - util.addAxisLabel(c, 'Higher Privacy →', '') - util.ggPlotBg(c, false) - c.layers[0].append('div') - .st({fontSize: 12, color: '#555', width: 120*2, textAlign: 'center', lineHeight: '1.3em', verticalAlign: 'top'}) - .translate([c.width/2 - 120, c.height + 45]) - .html('in ε') - - var line = d3.line().x(d => c.x(d.epsilon)).y(d => c.y(d.accuracy)) - - var lineSel = c.svg.append('g').appendMany('path.accuracy-line', byLabel) - .at({ - d: line, - fill: 'none', - stroke: '#000', - // opacity: 0, - }) - .on('mousemove', setActiveLabel) - - var circleSel = c.svg.append('g') - .appendMany('g.accuracy-circle', data) - .translate(d => [c.x(d.epsilon), c.y(d.accuracy)]) - .on('mousemove', setActiveLabel) - // .call(d3.attachTooltip) - - circleSel.append('circle') - .at({r: 7, stroke: '#fff'}) - - circleSel.append('text') - .text(d => d.label) - .at({textAnchor: 'middle', fontSize: 10, fill: '#fff', dy: '.33em'}) - - setActiveLabel(state) - function setActiveLabel({label}){ - lineSel - .classed('active', 0) - .filter(d => d.label == label) - .classed('active', 1) - .raise() - - circleSel - .classed('active', 0) - .filter(d => d.label == label) - .classed('active', 1) - .raise() - - state.label = label - } - - - async function updateDatasetSize(){ - var newData = await loadData() - data = newData.data - byLabel = newData.byLabel - - lineSel.data(byLabel) - .transition() - .at({d: line}) - - circleSel.data(data) - .transition() - .translate(d => [c.x(d.epsilon), c.y(d.accuracy)]) - - c.svg.select('text.annotation').remove() - } - - function updateThreshold(){ - data.forEach(d => { - d.accuracy = d3.mean(d.digitClass, e => e[d.key] > state.threshold) - }) - - lineSel.at({d: line}) - circleSel.translate(d => [c.x(d.epsilon), c.y(d.accuracy)]) - - c.svg.select('.y .axis-label').text(`Test Points With More Than ${d3.format('.2%')(state.threshold)} Confidence In Label`) - - c.svg.select('text.annotation').remove() - } - updateThreshold() - - return {c, updateDatasetSize, updateThreshold} -} - - -async function init(){ - sel.append('div.chart-title').text('High accuracy models can still perform poorly on some digit classes') - - var chart = await initChart() - - var buttonRowSel = sel.append('div.button-row') - .st({height: 50}) - - var buttonSel = buttonRowSel.append('div') - .st({width: 500}) - .append('span.chart-title').text('Training points') - .parent() - .append('div').st({display: 'inline-block', width: 300, marginLeft: 10}) - .append('div.digit-button-container.dataset_size') - .appendMany('div.button', [2000, 3750, 7500, 15000, 30000, 60000]) - .text(d3.format(',')) - .classed('active', d => d == state.dataset_size) - .on('click', d => { - buttonSel.classed('active', e => e == d) - state.dataset_size = d - chart.updateDatasetSize() - }) - - buttonRowSel.append('div.conf-slider') - .append('span.chart-title').text('Confidence threshold') - .parent() - .append('input.slider-native') - .at({ - type: 'range', - min: .0001, - max: .9999, - step: .0001, - value: state.threshold, - }) - .on('input', function(){ - state.threshold = this.value - chart.updateThreshold() - }) - - - function addSliders(){ - var width = 140 - var height = 30 - var color = '#000' - - var sliders = [ - {key: 'threshold', label: 'Confidence threshold', r: [.0001, .9999]}, - ] - sliders.forEach(d => { - d.value = state[d.key] - d.xScale = d3.scaleLinear().range([0, width]).domain(d.r).clamp(1) - }) - - d3.select('.conf-slider .slider-container').remove() - d3.select('.slider-native').remove() - - var svgSel = d3.select('.conf-slider').parent() - // .st({marginTop: 5, marginBottom: 5}) - .appendMany('div.slider-container', sliders) - .append('svg').at({width, height}) - .append('g').translate([10, 25]) - - var sliderSel = svgSel - .on('click', function(d){ - d.value = d.xScale.invert(d3.mouse(this)[0]) - renderSliders(d) - }) - .classed('slider', true) - .st({cursor: 'pointer'}) - - var textSel = sliderSel.append('text.annotation') - .at({y: -15, fontWeight: 300, textAnchor: 'middle', x: 180/2}) - - sliderSel.append('rect') - .at({width, height, y: -height/2, fill: 'rgba(0,0,0,0)'}) - - sliderSel.append('path').at({ - d: `M 0 -.5 H ${width}`, - stroke: color, - strokeWidth: 1 - }) - - var leftPathSel = sliderSel.append('path').at({ - d: `M 0 -.5 H ${width}`, - stroke: color, - strokeWidth: 3 - }) - - var drag = d3.drag() - .on('drag', function(d){ - var x = d3.mouse(this)[0] - d.value = d.xScale.invert(x) - - renderSliders(d) - }) - - var circleSel = sliderSel.append('circle').call(drag) - .at({r: 7, stroke: '#000'}) - - function renderSliders(d){ - if (d) state[d.key] = d.value - - circleSel.at({cx: d => d.xScale(d.value)}) - leftPathSel.at({d: d => `M 0 -.5 H ${d.xScale(d.value)}`}) - textSel - .at({x: d => d.xScale(d.value)}) - .text(d => d3.format('.2%')(d.value)) - chart.updateThreshold() - } - renderSliders() - } - addSliders() - - - chart.c.svg.append('text.annotation') - .translate([505, 212]) - .tspans(d3.wordwrap(`8s are correctly predicted with high confidence much more rarely than other digits`, 25), 12) - .at({textAnchor: 'end'}) - -} -init() - - - - diff --git a/spaces/mfumanelli/Stable-Diffusion-Loves-Cinema/README.md b/spaces/mfumanelli/Stable-Diffusion-Loves-Cinema/README.md deleted file mode 100644 index 0d7d90335c1760866e9ce592e3d356393feae684..0000000000000000000000000000000000000000 --- a/spaces/mfumanelli/Stable-Diffusion-Loves-Cinema/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stable Diffusion Loves Cinema -emoji: 📈 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mkutarna/audiobook_gen/README.md b/spaces/mkutarna/audiobook_gen/README.md deleted file mode 100644 index 0a7728be7145c7e39a263f196ce43f1c3e63da67..0000000000000000000000000000000000000000 --- a/spaces/mkutarna/audiobook_gen/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Audiobook Gen -emoji: 📚 -colorFrom: blue -colorTo: indigo -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: true -license: mit ---- - -# Audiobook Gen - -## About -Audiobook Gen converts text to audiobook format. It allows you to choose which voice you want to listen to. - -- Do you want to listen to a book that isn't available on Audible? -- Have you tried an audiobook from LibriVox and found the audio quality lacking? -- Don't have time to sit and read a document, but would prefer to listen to it? - -You can input various text formats (`txt`, `pdf`, `epub` - more options in development) and output a `zip` archive of audio files (`wav`). This is an open-source tool based on the [Silero text-to-speech toolkit](https://github.com/snakers4/silero-models) and uses Streamlit to deliver the application. - -# Demo - -## HuggingFace Space -A demonstration of this tool is hosted at HuggingFace Spaces - see [Audiobook_Gen](https://huggingface.co/spaces/mkutarna/audiobook_gen). - -Screenshot - -# Future - -Here is a list features in development and planned for the future: -- `html` file import -- `mobi`, `azw` ebook input -- optional audio output formats (for better compression) -- improved audio file output handling -- Docker image for local use \ No newline at end of file diff --git a/spaces/ml6team/toxic-comment-detection-german/README.md b/spaces/ml6team/toxic-comment-detection-german/README.md deleted file mode 100644 index 3b7806e2679af1d348cb8f75672d7c699b005c42..0000000000000000000000000000000000000000 --- a/spaces/ml6team/toxic-comment-detection-german/README.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: Toxic Comment Detection German -emoji: 🤬 -colorFrom: red -colorTo: gray -sdk: streamlit -sdk_version: 1.2.0 -python_version: 3.10 -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/mshkdm/VToonify/vtoonify/model/raft/train.py b/spaces/mshkdm/VToonify/vtoonify/model/raft/train.py deleted file mode 100644 index 307573097f13ee30c67bbe11658f457fdf1ead3c..0000000000000000000000000000000000000000 --- a/spaces/mshkdm/VToonify/vtoonify/model/raft/train.py +++ /dev/null @@ -1,247 +0,0 @@ -from __future__ import print_function, division -import sys -sys.path.append('core') - -import argparse -import os -import cv2 -import time -import numpy as np -import matplotlib.pyplot as plt - -import torch -import torch.nn as nn -import torch.optim as optim -import torch.nn.functional as F - -from torch.utils.data import DataLoader -from raft import RAFT -import evaluate -import datasets - -from torch.utils.tensorboard import SummaryWriter - -try: - from torch.cuda.amp import GradScaler -except: - # dummy GradScaler for PyTorch < 1.6 - class GradScaler: - def __init__(self): - pass - def scale(self, loss): - return loss - def unscale_(self, optimizer): - pass - def step(self, optimizer): - optimizer.step() - def update(self): - pass - - -# exclude extremly large displacements -MAX_FLOW = 400 -SUM_FREQ = 100 -VAL_FREQ = 5000 - - -def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW): - """ Loss function defined over sequence of flow predictions """ - - n_predictions = len(flow_preds) - flow_loss = 0.0 - - # exlude invalid pixels and extremely large diplacements - mag = torch.sum(flow_gt**2, dim=1).sqrt() - valid = (valid >= 0.5) & (mag < max_flow) - - for i in range(n_predictions): - i_weight = gamma**(n_predictions - i - 1) - i_loss = (flow_preds[i] - flow_gt).abs() - flow_loss += i_weight * (valid[:, None] * i_loss).mean() - - epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt() - epe = epe.view(-1)[valid.view(-1)] - - metrics = { - 'epe': epe.mean().item(), - '1px': (epe < 1).float().mean().item(), - '3px': (epe < 3).float().mean().item(), - '5px': (epe < 5).float().mean().item(), - } - - return flow_loss, metrics - - -def count_parameters(model): - return sum(p.numel() for p in model.parameters() if p.requires_grad) - - -def fetch_optimizer(args, model): - """ Create the optimizer and learning rate scheduler """ - optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon) - - scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100, - pct_start=0.05, cycle_momentum=False, anneal_strategy='linear') - - return optimizer, scheduler - - -class Logger: - def __init__(self, model, scheduler): - self.model = model - self.scheduler = scheduler - self.total_steps = 0 - self.running_loss = {} - self.writer = None - - def _print_training_status(self): - metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())] - training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0]) - metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data) - - # print the training status - print(training_str + metrics_str) - - if self.writer is None: - self.writer = SummaryWriter() - - for k in self.running_loss: - self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps) - self.running_loss[k] = 0.0 - - def push(self, metrics): - self.total_steps += 1 - - for key in metrics: - if key not in self.running_loss: - self.running_loss[key] = 0.0 - - self.running_loss[key] += metrics[key] - - if self.total_steps % SUM_FREQ == SUM_FREQ-1: - self._print_training_status() - self.running_loss = {} - - def write_dict(self, results): - if self.writer is None: - self.writer = SummaryWriter() - - for key in results: - self.writer.add_scalar(key, results[key], self.total_steps) - - def close(self): - self.writer.close() - - -def train(args): - - model = nn.DataParallel(RAFT(args), device_ids=args.gpus) - print("Parameter Count: %d" % count_parameters(model)) - - if args.restore_ckpt is not None: - model.load_state_dict(torch.load(args.restore_ckpt), strict=False) - - model.cuda() - model.train() - - if args.stage != 'chairs': - model.module.freeze_bn() - - train_loader = datasets.fetch_dataloader(args) - optimizer, scheduler = fetch_optimizer(args, model) - - total_steps = 0 - scaler = GradScaler(enabled=args.mixed_precision) - logger = Logger(model, scheduler) - - VAL_FREQ = 5000 - add_noise = True - - should_keep_training = True - while should_keep_training: - - for i_batch, data_blob in enumerate(train_loader): - optimizer.zero_grad() - image1, image2, flow, valid = [x.cuda() for x in data_blob] - - if args.add_noise: - stdv = np.random.uniform(0.0, 5.0) - image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0) - image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0) - - flow_predictions = model(image1, image2, iters=args.iters) - - loss, metrics = sequence_loss(flow_predictions, flow, valid, args.gamma) - scaler.scale(loss).backward() - scaler.unscale_(optimizer) - torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) - - scaler.step(optimizer) - scheduler.step() - scaler.update() - - logger.push(metrics) - - if total_steps % VAL_FREQ == VAL_FREQ - 1: - PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name) - torch.save(model.state_dict(), PATH) - - results = {} - for val_dataset in args.validation: - if val_dataset == 'chairs': - results.update(evaluate.validate_chairs(model.module)) - elif val_dataset == 'sintel': - results.update(evaluate.validate_sintel(model.module)) - elif val_dataset == 'kitti': - results.update(evaluate.validate_kitti(model.module)) - - logger.write_dict(results) - - model.train() - if args.stage != 'chairs': - model.module.freeze_bn() - - total_steps += 1 - - if total_steps > args.num_steps: - should_keep_training = False - break - - logger.close() - PATH = 'checkpoints/%s.pth' % args.name - torch.save(model.state_dict(), PATH) - - return PATH - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--name', default='raft', help="name your experiment") - parser.add_argument('--stage', help="determines which dataset to use for training") - parser.add_argument('--restore_ckpt', help="restore checkpoint") - parser.add_argument('--small', action='store_true', help='use small model') - parser.add_argument('--validation', type=str, nargs='+') - - parser.add_argument('--lr', type=float, default=0.00002) - parser.add_argument('--num_steps', type=int, default=100000) - parser.add_argument('--batch_size', type=int, default=6) - parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512]) - parser.add_argument('--gpus', type=int, nargs='+', default=[0,1]) - parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') - - parser.add_argument('--iters', type=int, default=12) - parser.add_argument('--wdecay', type=float, default=.00005) - parser.add_argument('--epsilon', type=float, default=1e-8) - parser.add_argument('--clip', type=float, default=1.0) - parser.add_argument('--dropout', type=float, default=0.0) - parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting') - parser.add_argument('--add_noise', action='store_true') - args = parser.parse_args() - - torch.manual_seed(1234) - np.random.seed(1234) - - if not os.path.isdir('checkpoints'): - os.mkdir('checkpoints') - - train(args) \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/megatron_11b/README.md b/spaces/mshukor/UnIVAL/fairseq/examples/megatron_11b/README.md deleted file mode 100644 index 945c96c91e2e2d93466abc28d90bc25a1e7dd471..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/megatron_11b/README.md +++ /dev/null @@ -1,161 +0,0 @@ -# Megatron-11b - -Megatron-11b is a unidirectional language model with `11B` parameters based on [Megatron-LM](https://arxiv.org/pdf/1909.08053.pdf). Following the original Megatron work, we trained the model using intra-layer model parallelism with each layer's parameters split across 8 GPUs. - -Megatron-11b is trained on the same data and uses the same byte-pair encoding (BPE) as [RoBERTa](https://arxiv.org/pdf/1907.11692.pdf). - -## Pre-trained models - -Model | Description | # params | # filesize | Download ----|---|---|---|--- -`megatron_11b` | megatron_11b unidirectional language model | 11B | 19Gb | [megatron_11b.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/model_parallel/megatron_11b.tar.gz) - -#### Architecture: - -Param | Value ----|--- -embed_dim | 3072 -ffn_dim | 3072 * 6 -layers | 72 -attention heads | 32 - -#### Training details: - -Param | value ----|--- -bsz | 512 -num_updates | 300,000 -peak_lr | 1.5e-04 -lr scheduler | inverse_sqrt -clip norm | 0.0 - - -## Example training command (model parallel) - -Megatron-11b contains too many parameters to train on a single GPU. Following -the original Megatron work, we adopt an intra-layer model parallel training -approach in which each layer's parameters are split across multiple GPUs and -activations and gradients are communicated during the forward/backward pass, -respectively. We similarly split the loss computation using the -`vocab_parallel_cross_entropy` criterion. - -The following training command illustrates how to do model parallel training in -fairseq. We assume that each machine (node) has 8 GPUs among which to split the -model parameters (`--model-parallel-size 8`). If you have access to multiple -nodes, you may combine this with data parallel training by increasing -`--distributed-world-size`. - -To train Megatron-11b on a single node: - - -```bash -fairseq-train \ - --distributed-world-size 8 \ - --memory-efficient-fp16 \ - --num-workers 2 \ - --model-parallel-size 8 \ - --criterion vocab_parallel_cross_entropy \ - --task language_modeling \ - --sample-break-mode none \ - --tokens-per-sample 1024 \ - --arch transformer_lm_megatron_11b \ - --share-decoder-input-output-embed \ - --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-08 --clip-norm 0.0 \ - --lr-scheduler inverse_sqrt --lr 0.00015 \ - --warmup-updates 3000 --weight-decay 0.01 \ - --dropout 0.1 --attention-dropout 0.1 \ - --batch-size 2 \ - --max-update 300000; -``` - -Note: Above was tested on `DGX-1` box, with `8xV100-32Gb` GPUs. - -## Results - -**[Wikitext103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)** - -Model | Valid perplexity | Test perplexity ----|---|--- -`megatron_11b` | 10.64 | 10.54 - - -## Evaluating `megatron_11b` on Wikitext-103 - -#### 1. Downloading Megatron-11b -```bash -# WARNING: this file is 19GB -wget https://dl.fbaipublicfiles.com/fairseq/models/model_parallel/megatron_11b.tar.gz -tar -xzvf megatron_11b.tar.gz -``` - -#### 2. Download Wikitext-103 -```bash -wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip -unzip wikitext-103-raw-v1.zip -``` - -#### 3. Detokenize test tokens -Megatron-11b uses a byte-level BPE that expects raw (untokenized) input. Since -the wikitext-103 dataset comes tokenized, we apply a simple detokenization -process to restore the untokenized test set: - -```bash -python -m examples.megatron_11b.detok wikitext-103-raw/wiki.test.raw > wikitext-103-raw/wiki.test.detok -``` - -#### 4. BPE encoding -```bash -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' - -python -m examples.roberta.multiprocessing_bpe_encoder \ - --encoder-json encoder.json \ - --vocab-bpe vocab.bpe \ - --inputs "wikitext-103-raw/wiki.test.detok" \ - --outputs "wikitext-103-raw/wiki.test.bpe" \ - --workers 60; -``` - -#### 5. Fairseq binarize -```bash -fairseq-preprocess \ - --only-source \ - --testpref wikitext-103-raw/wiki.test.bpe \ - --srcdict megatron_11b/dict.txt \ - --destdir wikitext103-bin; -``` - -#### 6. Evaluating perplexity. -We can now evaluate perplexity on the test set. Note that because we've modified -the test set (via detokenization and BPE), the perplexity reported by -`fairseq-eval-lm` needs to be renormalized. - -Compute unnormalized perplexity: - -```bash -DATA_PATH=wikitext103-bin/ -fairseq-eval-lm \ - $DATA_PATH \ - --path megatron_11b/model.pt \ - --task language_modeling \ - --gen-subset test \ - --batch-size 8 \ - --criterion cross_entropy \ - --context-window 992 \ - --distributed-world-size 8 \ - --model-parallel-size 8; -# Expected PPL (unnormalized_ppl): [8.46] -# Note: the eval command needs to run on 8 GPUs for the released model -``` -Renormalizing formula: `2 ^ ( log_2(unnormalized_PPL) * (270847 / 245566))`. -PPL After normalization: `10.54` - -To renormalize the perplexity, we must account for the change in token count -after detokenizing and appling BPE. The formula for this is: -`2 ^ ( log_2(unnormalized_PPL) * (new_token_cnt / orig_token_cnt))` - -For the wikitext-103 test set, the original token count is `245566` and the -token count after detokenization and applying BPE is `270847`. - -The perplexity after renormalization is: -`2 ^ ( log_2(8.46) * (270847 / 245566)) = 10.54` diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/translation_moe/translation_moe_src/__init__.py b/spaces/mshukor/UnIVAL/fairseq/examples/translation_moe/translation_moe_src/__init__.py deleted file mode 100644 index c0abe53e973b4bb31cfb062708965d002c79b6e7..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/translation_moe/translation_moe_src/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import translation_moe # noqa diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/tasks/__init__.py b/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/tasks/__init__.py deleted file mode 100644 index 6d7dd625e09451be671908578f93148f371f53cd..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/tasks/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .unpaired_audio_text import UnpairedAudioText - - -__all__ = [ - "UnpairedAudioText", -] diff --git a/spaces/mshukor/UnIVAL/models/taming/modules/diffusionmodules/model.py b/spaces/mshukor/UnIVAL/models/taming/modules/diffusionmodules/model.py deleted file mode 100644 index d3a5db6aa2ef915e270f1ae135e4a9918fdd884c..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/models/taming/modules/diffusionmodules/model.py +++ /dev/null @@ -1,776 +0,0 @@ -# pytorch_diffusion + derived encoder decoder -import math -import torch -import torch.nn as nn -import numpy as np - - -def get_timestep_embedding(timesteps, embedding_dim): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: - From Fairseq. - Build sinusoidal embeddings. - This matches the implementation in tensor2tensor, but differs slightly - from the description in Section 3.5 of "Attention Is All You Need". - """ - assert len(timesteps.shape) == 1 - - half_dim = embedding_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) - emb = emb.to(device=timesteps.device) - emb = timesteps.float()[:, None] * emb[None, :] - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) - if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0,1,0,0)) - return emb - - -def nonlinearity(x): - # swish - return x*torch.sigmoid(x) - - -def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -class Upsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") - if self.with_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=0) - - def forward(self, x): - if self.with_conv: - pad = (0,1,0,1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - else: - x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) - return x - - -class ResnetBlock(nn.Module): - def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout, temb_channels=512): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - - self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, - out_channels) - self.norm2 = Normalize(out_channels) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d(out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x, temb): - h = x - h = self.norm1(h) - h = nonlinearity(h) - h = self.conv1(h) - - if temb is not None: - h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] - - h = self.norm2(h) - h = nonlinearity(h) - h = self.dropout(h) - h = self.conv2(h) - - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) - - return x+h - - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = q.reshape(b,c,h*w) - q = q.permute(0,2,1) # b,hw,c - k = k.reshape(b,c,h*w) # b,c,hw - w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b,c,h*w) - w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) - h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - h_ = h_.reshape(b,c,h,w) - - h_ = self.proj_out(h_) - - return x+h_ - - -class Model(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, use_timestep=True): - super().__init__() - self.ch = ch - self.temb_ch = self.ch*4 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - self.use_timestep = use_timestep - if self.use_timestep: - # timestep embedding - self.temb = nn.Module() - self.temb.dense = nn.ModuleList([ - torch.nn.Linear(self.ch, - self.temb_ch), - torch.nn.Linear(self.temb_ch, - self.temb_ch), - ]) - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(AttnBlock(block_in)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = AttnBlock(block_in) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - skip_in = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - if i_block == self.num_res_blocks: - skip_in = ch*in_ch_mult[i_level] - block.append(ResnetBlock(in_channels=block_in+skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(AttnBlock(block_in)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - - def forward(self, x, t=None): - #assert x.shape[2] == x.shape[3] == self.resolution - - if self.use_timestep: - # timestep embedding - assert t is not None - temb = get_timestep_embedding(t, self.ch) - temb = self.temb.dense[0](temb) - temb = nonlinearity(temb) - temb = self.temb.dense[1](temb) - else: - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class Encoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, double_z=True, **ignore_kwargs): - super().__init__() - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(AttnBlock(block_in)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = AttnBlock(block_in) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - 2*z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1) - - - def forward(self, x): - #assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution) - - # timestep embedding - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class Decoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, **ignorekwargs): - super().__init__() - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.give_pre_end = give_pre_end - - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) - block_in = ch*ch_mult[self.num_resolutions-1] - curr_res = resolution // 2**(self.num_resolutions-1) - self.z_shape = (1,z_channels,curr_res,curr_res) - print("Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) - - # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=3, - stride=1, - padding=1) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = AttnBlock(block_in) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(AttnBlock(block_in)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, z): - #assert z.shape[1:] == self.z_shape[1:] - self.last_z_shape = z.shape - - # timestep embedding - temb = None - - # z to block_in - h = self.conv_in(z) - - # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - if self.give_pre_end: - return h - - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class VUNet(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, - in_channels, c_channels, - resolution, z_channels, use_timestep=False, **ignore_kwargs): - super().__init__() - self.ch = ch - self.temb_ch = self.ch*4 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - - self.use_timestep = use_timestep - if self.use_timestep: - # timestep embedding - self.temb = nn.Module() - self.temb.dense = nn.ModuleList([ - torch.nn.Linear(self.ch, - self.temb_ch), - torch.nn.Linear(self.temb_ch, - self.temb_ch), - ]) - - # downsampling - self.conv_in = torch.nn.Conv2d(c_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(AttnBlock(block_in)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - self.z_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=1, - stride=1, - padding=0) - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=2*block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = AttnBlock(block_in) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - skip_in = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - if i_block == self.num_res_blocks: - skip_in = ch*in_ch_mult[i_level] - block.append(ResnetBlock(in_channels=block_in+skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(AttnBlock(block_in)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - - def forward(self, x, z): - #assert x.shape[2] == x.shape[3] == self.resolution - - if self.use_timestep: - # timestep embedding - assert t is not None - temb = get_timestep_embedding(t, self.ch) - temb = self.temb.dense[0](temb) - temb = nonlinearity(temb) - temb = self.temb.dense[1](temb) - else: - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - z = self.z_in(z) - h = torch.cat((h,z),dim=1) - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class SimpleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, *args, **kwargs): - super().__init__() - self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), - ResnetBlock(in_channels=in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=2 * in_channels, - out_channels=4 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=4 * in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - nn.Conv2d(2*in_channels, in_channels, 1), - Upsample(in_channels, with_conv=True)]) - # end - self.norm_out = Normalize(in_channels) - self.conv_out = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - for i, layer in enumerate(self.model): - if i in [1,2,3]: - x = layer(x, None) - else: - x = layer(x) - - h = self.norm_out(x) - h = nonlinearity(h) - x = self.conv_out(h) - return x - - -class UpsampleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, - ch_mult=(2,2), dropout=0.0): - super().__init__() - # upsampling - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - block_in = in_channels - curr_res = resolution // 2 ** (self.num_resolutions - 1) - self.res_blocks = nn.ModuleList() - self.upsample_blocks = nn.ModuleList() - for i_level in range(self.num_resolutions): - res_block = [] - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - res_block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - self.res_blocks.append(nn.ModuleList(res_block)) - if i_level != self.num_resolutions - 1: - self.upsample_blocks.append(Upsample(block_in, True)) - curr_res = curr_res * 2 - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # upsampling - h = x - for k, i_level in enumerate(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - h = self.res_blocks[i_level][i_block](h, None) - if i_level != self.num_resolutions - 1: - h = self.upsample_blocks[k](h) - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - diff --git a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/search/search.js b/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/search/search.js deleted file mode 100644 index bcabf7211b8b4910e2b7da259992e23d3fc42780..0000000000000000000000000000000000000000 --- a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/search/search.js +++ /dev/null @@ -1,7 +0,0 @@ -!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).RevealSearch=t()}(this,(function(){"use strict";var e="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{},t=function(e){try{return!!e()}catch(e){return!0}},n=!t((function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]})),r=function(e){return e&&e.Math==Math&&e},o=r("object"==typeof globalThis&&globalThis)||r("object"==typeof window&&window)||r("object"==typeof self&&self)||r("object"==typeof e&&e)||function(){return this}()||Function("return this")(),i=t,c=/#|\.prototype\./,a=function(e,t){var n=l[u(e)];return n==s||n!=f&&("function"==typeof t?i(t):!!t)},u=a.normalize=function(e){return String(e).replace(c,".").toLowerCase()},l=a.data={},f=a.NATIVE="N",s=a.POLYFILL="P",p=a,d=function(e){return"object"==typeof e?null!==e:"function"==typeof e},g=d,h=function(e){if(!g(e))throw TypeError(String(e)+" is not an object");return e},y=d,v=h,x=function(e){if(!y(e)&&null!==e)throw TypeError("Can't set "+String(e)+" as a prototype");return e},b=Object.setPrototypeOf||("__proto__"in{}?function(){var e,t=!1,n={};try{(e=Object.getOwnPropertyDescriptor(Object.prototype,"__proto__").set).call(n,[]),t=n instanceof Array}catch(e){}return function(n,r){return v(n),x(r),t?e.call(n,r):n.__proto__=r,n}}():void 0),m=d,E=b,S={},w=d,O=o.document,R=w(O)&&w(O.createElement),T=function(e){return R?O.createElement(e):{}},_=!n&&!t((function(){return 7!=Object.defineProperty(T("div"),"a",{get:function(){return 7}}).a})),j=d,P=function(e,t){if(!j(e))return e;var n,r;if(t&&"function"==typeof(n=e.toString)&&!j(r=n.call(e)))return r;if("function"==typeof(n=e.valueOf)&&!j(r=n.call(e)))return r;if(!t&&"function"==typeof(n=e.toString)&&!j(r=n.call(e)))return r;throw TypeError("Can't convert object to primitive value")},I=n,C=_,N=h,A=P,k=Object.defineProperty;S.f=I?k:function(e,t,n){if(N(e),t=A(t,!0),N(n),C)try{return k(e,t,n)}catch(e){}if("get"in n||"set"in n)throw TypeError("Accessors not supported");return"value"in n&&(e[t]=n.value),e};var $={},L=function(e){if(null==e)throw TypeError("Can't call method on "+e);return e},M=L,U=function(e){return Object(M(e))},D=U,F={}.hasOwnProperty,z=function(e,t){return F.call(D(e),t)},K={}.toString,B=function(e){return K.call(e).slice(8,-1)},W=B,G="".split,V=t((function(){return!Object("z").propertyIsEnumerable(0)}))?function(e){return"String"==W(e)?G.call(e,""):Object(e)}:Object,Y=L,q=function(e){return V(Y(e))},X=Math.ceil,H=Math.floor,J=function(e){return isNaN(e=+e)?0:(e>0?H:X)(e)},Q=J,Z=Math.min,ee=function(e){return e>0?Z(Q(e),9007199254740991):0},te=J,ne=Math.max,re=Math.min,oe=q,ie=ee,ce=function(e,t){var n=te(e);return n<0?ne(n+t,0):re(n,t)},ae=function(e){return function(t,n,r){var o,i=oe(t),c=ie(i.length),a=ce(r,c);if(e&&n!=n){for(;c>a;)if((o=i[a++])!=o)return!0}else for(;c>a;a++)if((e||a in i)&&i[a]===n)return e||a||0;return!e&&-1}},ue={includes:ae(!0),indexOf:ae(!1)},le={},fe=z,se=q,pe=ue.indexOf,de=le,ge=function(e,t){var n,r=se(e),o=0,i=[];for(n in r)!fe(de,n)&&fe(r,n)&&i.push(n);for(;t.length>o;)fe(r,n=t[o++])&&(~pe(i,n)||i.push(n));return i},he=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"].concat("length","prototype");$.f=Object.getOwnPropertyNames||function(e){return ge(e,he)};var ye={exports:{}},ve=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}},xe=S,be=ve,me=n?function(e,t,n){return xe.f(e,t,be(1,n))}:function(e,t,n){return e[t]=n,e},Ee=o,Se=me,we=function(e,t){try{Se(Ee,e,t)}catch(n){Ee[e]=t}return t},Oe=we,Re="__core-js_shared__",Te=o[Re]||Oe(Re,{}),_e=Te;(ye.exports=function(e,t){return _e[e]||(_e[e]=void 0!==t?t:{})})("versions",[]).push({version:"3.12.1",mode:"global",copyright:"© 2021 Denis Pushkarev (zloirock.ru)"});var je,Pe,Ie=0,Ce=Math.random(),Ne=function(e){return"Symbol("+String(void 0===e?"":e)+")_"+(++Ie+Ce).toString(36)},Ae=o,ke=o,$e=function(e){return"function"==typeof e?e:void 0},Le=function(e,t){return arguments.length<2?$e(Ae[e])||$e(ke[e]):Ae[e]&&Ae[e][t]||ke[e]&&ke[e][t]},Me=Le("navigator","userAgent")||"",Ue=o.process,De=Ue&&Ue.versions,Fe=De&&De.v8;Fe?Pe=(je=Fe.split("."))[0]<4?1:je[0]+je[1]:Me&&(!(je=Me.match(/Edge\/(\d+)/))||je[1]>=74)&&(je=Me.match(/Chrome\/(\d+)/))&&(Pe=je[1]);var ze=Pe&&+Pe,Ke=t,Be=!!Object.getOwnPropertySymbols&&!Ke((function(){return!String(Symbol())||!Symbol.sham&&ze&&ze<41})),We=Be&&!Symbol.sham&&"symbol"==typeof Symbol.iterator,Ge=o,Ve=ye.exports,Ye=z,qe=Ne,Xe=Be,He=We,Je=Ve("wks"),Qe=Ge.Symbol,Ze=He?Qe:Qe&&Qe.withoutSetter||qe,et=function(e){return Ye(Je,e)&&(Xe||"string"==typeof Je[e])||(Xe&&Ye(Qe,e)?Je[e]=Qe[e]:Je[e]=Ze("Symbol."+e)),Je[e]},tt=d,nt=B,rt=et("match"),ot=h,it=function(){var e=ot(this),t="";return e.global&&(t+="g"),e.ignoreCase&&(t+="i"),e.multiline&&(t+="m"),e.dotAll&&(t+="s"),e.unicode&&(t+="u"),e.sticky&&(t+="y"),t},ct={},at=t;function ut(e,t){return RegExp(e,t)}ct.UNSUPPORTED_Y=at((function(){var e=ut("a","y");return e.lastIndex=2,null!=e.exec("abcd")})),ct.BROKEN_CARET=at((function(){var e=ut("^r","gy");return e.lastIndex=2,null!=e.exec("str")}));var lt={exports:{}},ft=Te,st=Function.toString;"function"!=typeof ft.inspectSource&&(ft.inspectSource=function(e){return st.call(e)});var pt,dt,gt,ht=ft.inspectSource,yt=ht,vt=o.WeakMap,xt="function"==typeof vt&&/native code/.test(yt(vt)),bt=ye.exports,mt=Ne,Et=bt("keys"),St=xt,wt=d,Ot=me,Rt=z,Tt=Te,_t=function(e){return Et[e]||(Et[e]=mt(e))},jt=le,Pt="Object already initialized",It=o.WeakMap;if(St||Tt.state){var Ct=Tt.state||(Tt.state=new It),Nt=Ct.get,At=Ct.has,kt=Ct.set;pt=function(e,t){if(At.call(Ct,e))throw new TypeError(Pt);return t.facade=e,kt.call(Ct,e,t),t},dt=function(e){return Nt.call(Ct,e)||{}},gt=function(e){return At.call(Ct,e)}}else{var $t=_t("state");jt[$t]=!0,pt=function(e,t){if(Rt(e,$t))throw new TypeError(Pt);return t.facade=e,Ot(e,$t,t),t},dt=function(e){return Rt(e,$t)?e[$t]:{}},gt=function(e){return Rt(e,$t)}}var Lt={set:pt,get:dt,has:gt,enforce:function(e){return gt(e)?dt(e):pt(e,{})},getterFor:function(e){return function(t){var n;if(!wt(t)||(n=dt(t)).type!==e)throw TypeError("Incompatible receiver, "+e+" required");return n}}},Mt=o,Ut=me,Dt=z,Ft=we,zt=ht,Kt=Lt.get,Bt=Lt.enforce,Wt=String(String).split("String");(lt.exports=function(e,t,n,r){var o,i=!!r&&!!r.unsafe,c=!!r&&!!r.enumerable,a=!!r&&!!r.noTargetGet;"function"==typeof n&&("string"!=typeof t||Dt(n,"name")||Ut(n,"name",t),(o=Bt(n)).source||(o.source=Wt.join("string"==typeof t?t:""))),e!==Mt?(i?!a&&e[t]&&(c=!0):delete e[t],c?e[t]=n:Ut(e,t,n)):c?e[t]=n:Ft(t,n)})(Function.prototype,"toString",(function(){return"function"==typeof this&&Kt(this).source||zt(this)}));var Gt=Le,Vt=S,Yt=n,qt=et("species"),Xt=n,Ht=o,Jt=p,Qt=function(e,t,n){var r,o;return E&&"function"==typeof(r=t.constructor)&&r!==n&&m(o=r.prototype)&&o!==n.prototype&&E(e,o),e},Zt=S.f,en=$.f,tn=function(e){var t;return tt(e)&&(void 0!==(t=e[rt])?!!t:"RegExp"==nt(e))},nn=it,rn=ct,on=lt.exports,cn=t,an=Lt.enforce,un=function(e){var t=Gt(e),n=Vt.f;Yt&&t&&!t[qt]&&n(t,qt,{configurable:!0,get:function(){return this}})},ln=et("match"),fn=Ht.RegExp,sn=fn.prototype,pn=/a/g,dn=/a/g,gn=new fn(pn)!==pn,hn=rn.UNSUPPORTED_Y;if(Xt&&Jt("RegExp",!gn||hn||cn((function(){return dn[ln]=!1,fn(pn)!=pn||fn(dn)==dn||"/a/i"!=fn(pn,"i")})))){for(var yn=function(e,t){var n,r=this instanceof yn,o=tn(e),i=void 0===t;if(!r&&o&&e.constructor===yn&&i)return e;gn?o&&!i&&(e=e.source):e instanceof yn&&(i&&(t=nn.call(e)),e=e.source),hn&&(n=!!t&&t.indexOf("y")>-1)&&(t=t.replace(/y/g,""));var c=Qt(gn?new fn(e,t):fn(e,t),r?this:sn,yn);hn&&n&&(an(c).sticky=!0);return c},vn=function(e){e in yn||Zt(yn,e,{configurable:!0,get:function(){return fn[e]},set:function(t){fn[e]=t}})},xn=en(fn),bn=0;xn.length>bn;)vn(xn[bn++]);sn.constructor=yn,yn.prototype=sn,on(Ht,"RegExp",yn)}un("RegExp");var mn={},En={},Sn={}.propertyIsEnumerable,wn=Object.getOwnPropertyDescriptor,On=wn&&!Sn.call({1:2},1);En.f=On?function(e){var t=wn(this,e);return!!t&&t.enumerable}:Sn;var Rn=n,Tn=En,_n=ve,jn=q,Pn=P,In=z,Cn=_,Nn=Object.getOwnPropertyDescriptor;mn.f=Rn?Nn:function(e,t){if(e=jn(e),t=Pn(t,!0),Cn)try{return Nn(e,t)}catch(e){}if(In(e,t))return _n(!Tn.f.call(e,t),e[t])};var An={};An.f=Object.getOwnPropertySymbols;var kn=$,$n=An,Ln=h,Mn=Le("Reflect","ownKeys")||function(e){var t=kn.f(Ln(e)),n=$n.f;return n?t.concat(n(e)):t},Un=z,Dn=Mn,Fn=mn,zn=S,Kn=o,Bn=mn.f,Wn=me,Gn=lt.exports,Vn=we,Yn=function(e,t){for(var n=Dn(t),r=zn.f,o=Fn.f,i=0;i0&&(!i.multiline||i.multiline&&"\n"!==e[i.lastIndex-1])&&(u="(?: "+u+")",f=" "+f,l++),n=new RegExp("^(?:"+u+")",a)),rr&&(n=new RegExp("^"+u+"$(?!\\s)",a)),tr&&(t=i.lastIndex),r=Qn.call(c?n:i,f),c?r?(r.input=r.input.slice(l),r[0]=r[0].slice(l),r.index=i.lastIndex,i.lastIndex+=r[0].length):i.lastIndex=0:tr&&r&&(i.lastIndex=i.global?r.index+r[0].length:t),rr&&r&&r.length>1&&Zn.call(r[0],n,(function(){for(o=1;o")})),Sr="$0"==="a".replace(/./,"$0"),wr=vr("replace"),Or=!!/./[wr]&&""===/./[wr]("a","$0"),Rr=!yr((function(){var e=/(?:)/,t=e.exec;e.exec=function(){return t.apply(this,arguments)};var n="ab".split(e);return 2!==n.length||"a"!==n[0]||"b"!==n[1]})),Tr=J,_r=L,jr=function(e){return function(t,n){var r,o,i=String(_r(t)),c=Tr(n),a=i.length;return c<0||c>=a?e?"":void 0:(r=i.charCodeAt(c))<55296||r>56319||c+1===a||(o=i.charCodeAt(c+1))<56320||o>57343?e?i.charAt(c):r:e?i.slice(c,c+2):o-56320+(r-55296<<10)+65536}},Pr={codeAt:jr(!1),charAt:jr(!0)}.charAt,Ir=U,Cr=Math.floor,Nr="".replace,Ar=/\$([$&'`]|\d{1,2}|<[^>]*>)/g,kr=/\$([$&'`]|\d{1,2})/g,$r=B,Lr=or,Mr=function(e,t,n,r){var o=vr(e),i=!yr((function(){var t={};return t[o]=function(){return 7},7!=""[e](t)})),c=i&&!yr((function(){var t=!1,n=/a/;return"split"===e&&((n={}).constructor={},n.constructor[br]=function(){return n},n.flags="",n[o]=/./[o]),n.exec=function(){return t=!0,null},n[o](""),!t}));if(!i||!c||"replace"===e&&(!Er||!Sr||Or)||"split"===e&&!Rr){var a=/./[o],u=n(o,""[e],(function(e,t,n,r,o){var c=t.exec;return c===hr||c===mr.exec?i&&!o?{done:!0,value:a.call(t,n,r)}:{done:!0,value:e.call(n,t,r)}:{done:!1}}),{REPLACE_KEEPS_$0:Sr,REGEXP_REPLACE_SUBSTITUTES_UNDEFINED_CAPTURE:Or}),l=u[0],f=u[1];gr(String.prototype,e,l),gr(mr,o,2==t?function(e,t){return f.call(e,this,t)}:function(e){return f.call(e,this)})}r&&xr(mr[o],"sham",!0)},Ur=h,Dr=ee,Fr=J,zr=L,Kr=function(e,t,n){return t+(n?Pr(e,t).length:1)},Br=function(e,t,n,r,o,i){var c=n+e.length,a=r.length,u=kr;return void 0!==o&&(o=Ir(o),u=Ar),Nr.call(i,u,(function(i,u){var l;switch(u.charAt(0)){case"$":return"$";case"&":return e;case"`":return t.slice(0,n);case"'":return t.slice(c);case"<":l=o[u.slice(1,-1)];break;default:var f=+u;if(0===f)return i;if(f>a){var s=Cr(f/10);return 0===s?i:s<=a?void 0===r[s-1]?u.charAt(1):r[s-1]+u.charAt(1):i}l=r[f-1]}return void 0===l?"":l}))},Wr=function(e,t){var n=e.exec;if("function"==typeof n){var r=n.call(e,t);if("object"!=typeof r)throw TypeError("RegExp exec method returned something other than an Object or null");return r}if("RegExp"!==$r(e))throw TypeError("RegExp#exec called on incompatible receiver");return Lr.call(e,t)},Gr=Math.max,Vr=Math.min;Mr("replace",2,(function(e,t,n,r){var o=r.REGEXP_REPLACE_SUBSTITUTES_UNDEFINED_CAPTURE,i=r.REPLACE_KEEPS_$0,c=o?"$":"$0";return[function(n,r){var o=zr(this),i=null==n?void 0:n[e];return void 0!==i?i.call(n,o,r):t.call(String(o),n,r)},function(e,r){if(!o&&i||"string"==typeof r&&-1===r.indexOf(c)){var a=n(t,e,this,r);if(a.done)return a.value}var u=Ur(e),l=String(this),f="function"==typeof r;f||(r=String(r));var s=u.global;if(s){var p=u.unicode;u.lastIndex=0}for(var d=[];;){var g=Wr(u,l);if(null===g)break;if(d.push(g),!s)break;""===String(g[0])&&(u.lastIndex=Kr(l,Dr(u.lastIndex),p))}for(var h,y="",v=0,x=0;x=v&&(y+=l.slice(v,m)+R,v=m+b.length)}return y+l.slice(v)}]}));var Yr={};Yr[et("toStringTag")]="z";var qr="[object z]"===String(Yr),Xr=qr,Hr=B,Jr=et("toStringTag"),Qr="Arguments"==Hr(function(){return arguments}()),Zr=Xr?Hr:function(e){var t,n,r;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(n=function(e,t){try{return e[t]}catch(e){}}(t=Object(e),Jr))?n:Qr?Hr(t):"Object"==(r=Hr(t))&&"function"==typeof t.callee?"Arguments":r},eo=qr?{}.toString:function(){return"[object "+Zr(this)+"]"},to=qr,no=lt.exports,ro=eo;to||no(Object.prototype,"toString",ro,{unsafe:!0}) -/*! - * Handles finding a text string anywhere in the slides and showing the next occurrence to the user - * by navigatating to that slide and highlighting it. - * - * @author Jon Snyder , February 2013 - */;return function(){var e,t,n,r,o,i,c;function a(){(t=document.createElement("div")).classList.add("searchbox"),t.style.position="absolute",t.style.top="10px",t.style.right="10px",t.style.zIndex=10,t.innerHTML='\n\t\t',(n=t.querySelector(".searchinput")).style.width="240px",n.style.fontSize="14px",n.style.padding="4px 6px",n.style.color="#000",n.style.background="#fff",n.style.borderRadius="2px",n.style.border="0",n.style.outline="0",n.style.boxShadow="0 2px 18px rgba(0, 0, 0, 0.2)",n.style["-webkit-appearance"]="none",e.getRevealElement().appendChild(t),n.addEventListener("keyup",(function(t){switch(t.keyCode){case 13:t.preventDefault(),function(){if(i){var t=n.value;""===t?(c&&c.remove(),r=null):(c=new f("slidecontent"),r=c.apply(t),o=0)}r&&(r.length&&r.length<=o&&(o=0),r.length>o&&(e.slide(r[o].h,r[o].v),o++))}(),i=!1;break;default:i=!0}}),!1),l()}function u(){t||a(),t.style.display="inline",n.focus(),n.select()}function l(){t||a(),t.style.display="none",c&&c.remove()}function f(t,n){var r=document.getElementById(t)||document.body,o=n||"EM",i=new RegExp("^(?:"+o+"|SCRIPT|FORM)$"),c=["#ff6","#a0ffff","#9f9","#f99","#f6f"],a=[],u=0,l="",f=[];this.setRegex=function(e){e=e.replace(/^[^\w]+|[^\w]+$/g,"").replace(/[^\w'-]+/g,"|"),l=new RegExp("("+e+")","i")},this.getRegex=function(){return l.toString().replace(/^\/\\b\(|\)\\b\/i$/g,"").replace(/\|/g," ")},this.hiliteWords=function(t){if(null!=t&&t&&l&&!i.test(t.nodeName)){if(t.hasChildNodes())for(var n=0;n np.array: - # tokenize text - inputs = self.processor(text=text, return_tensors="pt") - # generate spectrogram using backbone model - spectrogram = self.model.generate_speech(inputs["input_ids"].to(self.device), - self.speaker_embedding.to(self.device)) - # decode spectrogram into waveform using vocoder - with torch.no_grad(): - waveform_array = self.vocoder(spectrogram).detach().cpu().numpy() - return waveform_array - - def create_speaker_embedding(self, waveform: torch.tensor) -> torch.tensor: - with torch.no_grad(): - speaker_embeddings = self.speaker_model.encode_batch(waveform) - speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2) - self.speaker_embedding = speaker_embeddings - speaker_embeddings = speaker_embeddings.squeeze() - return speaker_embeddings diff --git a/spaces/nazianafis/Sentiment-Analysis/README.md b/spaces/nazianafis/Sentiment-Analysis/README.md deleted file mode 100644 index a6231a593269984ab960d8eb04fa325c16277d91..0000000000000000000000000000000000000000 --- a/spaces/nazianafis/Sentiment-Analysis/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Sentiment Analysis -emoji: 🌖 -colorFrom: yellow -colorTo: yellow -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Birdline Platinum Skin Pack V4.2.2 (for Samplitude.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Birdline Platinum Skin Pack V4.2.2 (for Samplitude.md deleted file mode 100644 index 86b779dbdd940fa0289e38e86156304640bcf990..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Birdline Platinum Skin Pack V4.2.2 (for Samplitude.md +++ /dev/null @@ -1,23 +0,0 @@ - -Here is a possible title and article with html formatting for the keyword "Birdline Platinum Skin Pack V4.2.2 (for Samplitude": - -

    Birdline Platinum Skin Pack V4.2.2: A Must-Have for Samplitude Users

    -

    If you are a fan of Samplitude, the powerful digital audio workstation from MAGIX, you might want to check out the latest version of Birdline Platinum Skin Pack. This is a collection of over 40 high-quality skins that can transform the look and feel of your Samplitude interface. Whether you want a dark, sleek, futuristic, or retro style, you can find it in the Birdline Platinum Skin Pack V4.2.2.

    -

    Some of the features of the Birdline Platinum Skin Pack V4.2.2 include:

    -

    Birdline Platinum Skin Pack V4.2.2 (for Samplitude


    Download File ::: https://urlcod.com/2uIb60



    -
      -
    • Compatibility with Samplitude Pro X5 and Pro X6
    • -
    • Support for high-resolution monitors and scaling options
    • -
    • Customizable colors, fonts, icons, and knobs
    • -
    • Easy installation and activation
    • -
    • Free updates and technical support
    • -
    -

    The Birdline Platinum Skin Pack V4.2.2 is available for purchase from the official website for $49.95 USD. You can also download a free demo version to try before you buy. If you already own a previous version of the Birdline Platinum Skin Pack, you can upgrade to the latest version for $19.95 USD.

    -

    Don't miss this opportunity to enhance your Samplitude experience with the Birdline Platinum Skin Pack V4.2.2. You will be amazed by how much difference a skin can make!

    Here are a few more paragraphs for the article: - -

    What are the benefits of using skins for Samplitude? Well, besides giving your interface a fresh and personalized look, skins can also improve your workflow and productivity. By choosing a skin that suits your preferences and needs, you can reduce eye strain, increase visibility, and access the functions you use most often more easily. You can also switch between different skins depending on the project you are working on or the mood you are in.

    -

    The Birdline Platinum Skin Pack V4.2.2 offers a wide range of skins that cater to different tastes and styles. For example, if you want a skin that is dark and minimalistic, you can try the Black Diamond skin. If you want a skin that is colorful and vibrant, you can try the Rainbow skin. If you want a skin that is classic and elegant, you can try the Vintage skin. And if you want a skin that is futuristic and cool, you can try the Matrix skin. These are just some of the examples of the skins available in the Birdline Platinum Skin Pack V4.2.2.

    -

    How do you install and activate the Birdline Platinum Skin Pack V4.2.2? It's very simple and straightforward. After purchasing the skin pack from the official website, you will receive an email with a download link and a license key. You just need to download the zip file, extract it, and run the installer. Then, you need to enter your license key and select the skins you want to install. After that, you can launch Samplitude and choose your preferred skin from the Options menu.

    -

    7196e7f11a
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Contafiscal 4.0 2010.zip.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Contafiscal 4.0 2010.zip.md deleted file mode 100644 index 6bbac4fb4e83b2a6cbf7b992c0dd547b85e2ab9d..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Contafiscal 4.0 2010.zip.md +++ /dev/null @@ -1,36 +0,0 @@ -
    -

    Contafiscal 4.0 2010.zip: A Guide to Download and Install the Accounting Software

    -

    Contafiscal 4.0 2010.zip is a file that contains the installation package for Contafiscal, a software system that helps you manage your accounting and tax obligations in Mexico. Contafiscal is designed for small and medium-sized businesses, as well as professionals and freelancers who need to comply with the fiscal regulations of the country.

    -

    Contafiscal 4.0 2010.zip


    Download Ziphttps://urlcod.com/2uIcqr



    -

    In this article, we will show you how to download and install Contafiscal 4.0 2010.zip on your computer, as well as some of the features and benefits of using this software.

    -

    How to Download Contafiscal 4.0 2010.zip

    -

    There are several sources where you can download Contafiscal 4.0 2010.zip, but we recommend you to use the official website of the developer, which is https://sway.office.com/dhiYX5D90JS8baEx [^1^]. This way, you can ensure that you are getting the latest and most secure version of the software.

    -

    To download Contafiscal 4.0 2010.zip from the official website, follow these steps:

    -

    -
      -
    1. Go to https://sway.office.com/dhiYX5D90JS8baEx [^1^] on your web browser.
    2. -
    3. Click on the "Download" button at the top right corner of the page.
    4. -
    5. Save the file Contafiscal 4.0 2010.zip on your computer, preferably on your desktop or in a folder where you can easily find it.
    6. -
    -

    How to Install Contafiscal 4.0 2010.zip

    -

    Once you have downloaded Contafiscal 4.0 2010.zip, you need to extract it and run the setup file to install the software on your computer. To do this, follow these steps:

    -
      -
    1. Locate the file Contafiscal 4.0 2010.zip on your computer and right-click on it.
    2. -
    3. Select "Extract All" from the menu that appears.
    4. -
    5. Choose a destination folder where you want to extract the files and click on "Extract".
    6. -
    7. Open the folder where you extracted the files and double-click on the file "Setup.exe".
    8. -
    9. Follow the instructions on the screen to complete the installation process.
    10. -
    -

    Features and Benefits of Contafiscal

    -

    Contafiscal is a software system that helps you manage your accounting and tax obligations in Mexico. Some of the features and benefits of using this software are:

    -
      -
    • It allows you to generate and print invoices, receipts, vouchers, payroll slips, and other fiscal documents in compliance with the Mexican regulations.
    • -
    • It integrates with other software systems such as Excel, Word, Outlook, and QuickBooks.
    • -
    • It automates the calculation of taxes, deductions, withholdings, and other fiscal concepts.
    • -
    • It generates reports and statements that help you monitor your income, expenses, cash flow, inventory, assets, liabilities, and equity.
    • -
    • It updates automatically with the latest changes in the fiscal laws and regulations of Mexico.
    • -
    • It offers technical support and customer service via phone, email, chat, or remote assistance.
    • -
    -

    If you are looking for a reliable and easy-to-use accounting software for your business or professional activity in Mexico, Contafiscal 4.0 2010.zip is a great option to consider. Download and install it today and enjoy its features and benefits.

    e93f5a0c3f
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/HauntedHouseCrypticGravespatch8((INSTALL)) Download.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/HauntedHouseCrypticGravespatch8((INSTALL)) Download.md deleted file mode 100644 index dc2a52e92f9da4ba18e5d53255e5196b2ffcdd84..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/HauntedHouseCrypticGravespatch8((INSTALL)) Download.md +++ /dev/null @@ -1,23 +0,0 @@ - -

    How to Download Patch 8 for Haunted House: Cryptic Graves

    -

    Haunted House: Cryptic Graves is a terrifying, first-person, adventure-horror game inspired by the classic Atari hit of the same name. The game was released in 2014 by Dreampainters Software and Atari Inc. and received several updates to improve its gameplay, performance and stability[^1^].

    -

    One of the latest updates is Patch 8, which was released on February 5, 2015. This patch is also known as The Adventure Update, as it brings Haunted House: Cryptic Graves back to its adventure-game roots. It includes refinements to some of the major puzzles and boss encounters, re-balanced Stalker AI behavior, more visual details and lore notes, full language support for Italian and German, and removal of voiceovers[^2^].

    -

    HauntedHouseCrypticGravespatch8download


    Download Ziphttps://urlcod.com/2uIbnQ



    -

    If you want to download Patch 8 for Haunted House: Cryptic Graves, you need to follow these steps:

    -
      -
    1. Make sure you have the Steam client installed on your computer and that you have purchased Haunted House: Cryptic Graves from the Steam store.
    2. -
    3. Launch Steam and log in to your account.
    4. -
    5. Go to your Library and find Haunted House: Cryptic Graves in your list of games.
    6. -
    7. Right-click on the game and select Properties.
    8. -
    9. Go to the Updates tab and make sure that Automatic updates are enabled.
    10. -
    11. If there is a pending update for the game, Steam will download it automatically. You can check the progress of the download in the Downloads section of Steam.
    12. -
    13. Once the update is downloaded, you can launch the game and enjoy the new features and improvements.
    14. -
    -

    If you have any issues with downloading or installing Patch 8 for Haunted House: Cryptic Graves, you can contact the developers through their Steam Community page[^2^] or their Facebook page[^3^]. They are always listening to feedback and working hard on further updates.

    -

    We hope this article was helpful and that you have a spooky time exploring Abaddon Grange!

    - -

    Haunted House: Cryptic Graves is a game that will challenge your wits and nerves as you explore the mysterious mansion of Zachary Graves, a notorious occultist and collector of all things supernatural. You play as Anya Graves, his heir, who inherits his estate and his powers to communicate with the dead. As you uncover the secrets and horrors of Abaddon Grange, you will also awaken a long-dormant evil that lurks in the shadows...

    -

    The game features a rich and immersive story written by award-winning comics and videogame writer Gordon Rennie, scriptwriter for Killzone. You will encounter various supernatural entities and phenomena, such as ghosts, poltergeists, witches, Jack the Ripper and more. You will also have to use your special powers and craft potions using the four elements of alchemy: fire, air, water and earth. The game has 18 Steam achievements to unlock and boasts stunning graphics and sound effects that will make you feel like you are really inside a haunted house.

    -

    If you are a fan of adventure-horror games and want to experience a thrilling and terrifying journey into the unknown, Haunted House: Cryptic Graves is the game for you. Download Patch 8 today and get ready to face your fears!

    cec2833e83
    -
    -
    \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Magix Digital Dj 2 Mac Crack.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Magix Digital Dj 2 Mac Crack.md deleted file mode 100644 index ae01e108c69583e41c42ce187f2bac04839fe8a3..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Magix Digital Dj 2 Mac Crack.md +++ /dev/null @@ -1,29 +0,0 @@ -
    -

    How to Crack Magix Digital DJ 2 for Mac

    -

    If you are looking for a way to crack Magix Digital DJ 2 for Mac, you might be disappointed. Magix Digital DJ 2 is a professional DJ software that offers a lot of features and functions for mixing and creating music. It is not a free software, and cracking it is illegal and unethical. However, you can still try it out for free for 30 days by downloading it from the official website[^1^]. In this article, we will review some of the features and benefits of Magix Digital DJ 2 for Mac, and why you should consider buying it instead of cracking it.

    -

    Features and Benefits of Magix Digital DJ 2 for Mac

    -

    Magix Digital DJ 2 for Mac is a powerful and easy-to-use DJ software that lets you mix your music library with your iTunes collection. You can also use it with any MIDI controller or turntable, or even with your mouse and keyboard. Here are some of the features and benefits of Magix Digital DJ 2 for Mac:

    -

    magix digital dj 2 mac crack


    DOWNLOAD ===> https://urlcod.com/2uI9Ru



    -
      -
    • It has a user-friendly interface that shows you all the essential information and controls on one screen.
    • -
    • It has an intelligent music manager that automatically analyzes your tracks and suggests suitable songs based on tempo, key, genre, and mood.
    • -
    • It has a mufin technology that allows you to discover new music based on your preferences and musical taste.
    • -
    • It has a loop function that lets you repeat certain effects or parts of a song as much as you want.
    • -
    • It has a scratch function that gives you the ability to create the classic DJ effect of scratching a vinyl record while playing.
    • -
    • It has a flanger effect that creates a phasing sound by mixing two signals milliseconds apart.
    • -
    • It has a master tempo function that lets you change the tempo of a song without affecting its pitch.
    • -
    • It has a cue function that lets you set markers on your tracks and jump to them instantly.
    • -
    • It has a record function that lets you record your mixes and save them as MP3 files.
    • -
    • It has a share function that lets you upload your mixes to SoundCloud or Facebook directly from the software.
    • -
    -

    Why You Should Buy Magix Digital DJ 2 for Mac Instead of Cracking It

    -

    Cracking Magix Digital DJ 2 for Mac is not only illegal and unethical, but also risky and unreliable. You might end up downloading malware or viruses that can harm your computer or compromise your personal data. You might also face legal consequences if you are caught using pirated software. Moreover, you will miss out on the benefits of buying Magix Digital DJ 2 for Mac, such as:

    -
      -
    • You will get regular updates and bug fixes that will improve the performance and stability of the software.
    • -
    • You will get access to customer support and technical assistance in case you encounter any problems or issues with the software.
    • -
    • You will get a lifetime license that will allow you to use the software on any compatible device without any limitations or restrictions.
    • -
    • You will support the developers and creators of the software who have invested their time, money, and effort to produce a quality product.
    • -
    -

    Magix Digital DJ 2 for Mac is a great software for anyone who wants to mix and create music like a pro. It is worth buying it instead of cracking it, as you will get more value, security, and satisfaction from using it. You can download it for free for 30 days from the official website[^1^] and see for yourself how amazing it is. If you like it, you can buy it for only $39.99 from the same website[^1^]. Don't miss this opportunity to unleash your creativity and have fun with Magix Digital DJ 2 for Mac!

    cec2833e83
    -
    -
    \ No newline at end of file diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/data/meshes/builtin.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/data/meshes/builtin.py deleted file mode 100644 index c0b23760e8268b068149931b173a4285ba451993..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/DensePose/densepose/data/meshes/builtin.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -from .catalog import MeshInfo, register_meshes - -DENSEPOSE_MESHES_DIR = "https://dl.fbaipublicfiles.com/densepose/meshes/" - -MESHES = [ - MeshInfo( - name="smpl_27554", - data="smpl_27554.pkl", - geodists="geodists/geodists_smpl_27554.pkl", - symmetry="symmetry/symmetry_smpl_27554.pkl", - texcoords="texcoords/texcoords_smpl_27554.pkl", - ), - MeshInfo( - name="chimp_5029", - data="chimp_5029.pkl", - geodists="geodists/geodists_chimp_5029.pkl", - symmetry="symmetry/symmetry_chimp_5029.pkl", - texcoords="texcoords/texcoords_chimp_5029.pkl", - ), - MeshInfo( - name="cat_5001", - data="cat_5001.pkl", - geodists="geodists/geodists_cat_5001.pkl", - symmetry="symmetry/symmetry_cat_5001.pkl", - texcoords="texcoords/texcoords_cat_5001.pkl", - ), - MeshInfo( - name="cat_7466", - data="cat_7466.pkl", - geodists="geodists/geodists_cat_7466.pkl", - symmetry="symmetry/symmetry_cat_7466.pkl", - texcoords="texcoords/texcoords_cat_7466.pkl", - ), - MeshInfo( - name="sheep_5004", - data="sheep_5004.pkl", - geodists="geodists/geodists_sheep_5004.pkl", - symmetry="symmetry/symmetry_sheep_5004.pkl", - texcoords="texcoords/texcoords_sheep_5004.pkl", - ), - MeshInfo( - name="zebra_5002", - data="zebra_5002.pkl", - geodists="geodists/geodists_zebra_5002.pkl", - symmetry="symmetry/symmetry_zebra_5002.pkl", - texcoords="texcoords/texcoords_zebra_5002.pkl", - ), - MeshInfo( - name="horse_5004", - data="horse_5004.pkl", - geodists="geodists/geodists_horse_5004.pkl", - symmetry="symmetry/symmetry_horse_5004.pkl", - texcoords="texcoords/texcoords_zebra_5002.pkl", - ), - MeshInfo( - name="giraffe_5002", - data="giraffe_5002.pkl", - geodists="geodists/geodists_giraffe_5002.pkl", - symmetry="symmetry/symmetry_giraffe_5002.pkl", - texcoords="texcoords/texcoords_giraffe_5002.pkl", - ), - MeshInfo( - name="elephant_5002", - data="elephant_5002.pkl", - geodists="geodists/geodists_elephant_5002.pkl", - symmetry="symmetry/symmetry_elephant_5002.pkl", - texcoords="texcoords/texcoords_elephant_5002.pkl", - ), - MeshInfo( - name="dog_5002", - data="dog_5002.pkl", - geodists="geodists/geodists_dog_5002.pkl", - symmetry="symmetry/symmetry_dog_5002.pkl", - texcoords="texcoords/texcoords_dog_5002.pkl", - ), - MeshInfo( - name="dog_7466", - data="dog_7466.pkl", - geodists="geodists/geodists_dog_7466.pkl", - symmetry="symmetry/symmetry_dog_7466.pkl", - texcoords="texcoords/texcoords_dog_7466.pkl", - ), - MeshInfo( - name="cow_5002", - data="cow_5002.pkl", - geodists="geodists/geodists_cow_5002.pkl", - symmetry="symmetry/symmetry_cow_5002.pkl", - texcoords="texcoords/texcoords_cow_5002.pkl", - ), - MeshInfo( - name="bear_4936", - data="bear_4936.pkl", - geodists="geodists/geodists_bear_4936.pkl", - symmetry="symmetry/symmetry_bear_4936.pkl", - texcoords="texcoords/texcoords_bear_4936.pkl", - ), -] - -register_meshes(MESHES, DENSEPOSE_MESHES_DIR) diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tools/visualize_json_results.py b/spaces/nikitaPDL2023/assignment4/detectron2/tools/visualize_json_results.py deleted file mode 100644 index 472190e0b3b38b55773795915badbb5bc4599d42..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/tools/visualize_json_results.py +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. - -import argparse -import json -import numpy as np -import os -from collections import defaultdict -import cv2 -import tqdm - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.structures import Boxes, BoxMode, Instances -from detectron2.utils.file_io import PathManager -from detectron2.utils.logger import setup_logger -from detectron2.utils.visualizer import Visualizer - - -def create_instances(predictions, image_size): - ret = Instances(image_size) - - score = np.asarray([x["score"] for x in predictions]) - chosen = (score > args.conf_threshold).nonzero()[0] - score = score[chosen] - bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4) - bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) - - labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen]) - - ret.scores = score - ret.pred_boxes = Boxes(bbox) - ret.pred_classes = labels - - try: - ret.pred_masks = [predictions[i]["segmentation"] for i in chosen] - except KeyError: - pass - return ret - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="A script that visualizes the json predictions from COCO or LVIS dataset." - ) - parser.add_argument("--input", required=True, help="JSON file produced by the model") - parser.add_argument("--output", required=True, help="output directory") - parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val") - parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold") - args = parser.parse_args() - - logger = setup_logger() - - with PathManager.open(args.input, "r") as f: - predictions = json.load(f) - - pred_by_image = defaultdict(list) - for p in predictions: - pred_by_image[p["image_id"]].append(p) - - dicts = list(DatasetCatalog.get(args.dataset)) - metadata = MetadataCatalog.get(args.dataset) - if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): - - def dataset_id_map(ds_id): - return metadata.thing_dataset_id_to_contiguous_id[ds_id] - - elif "lvis" in args.dataset: - # LVIS results are in the same format as COCO results, but have a different - # mapping from dataset category id to contiguous category id in [0, #categories - 1] - def dataset_id_map(ds_id): - return ds_id - 1 - - else: - raise ValueError("Unsupported dataset: {}".format(args.dataset)) - - os.makedirs(args.output, exist_ok=True) - - for dic in tqdm.tqdm(dicts): - img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1] - basename = os.path.basename(dic["file_name"]) - - predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2]) - vis = Visualizer(img, metadata) - vis_pred = vis.draw_instance_predictions(predictions).get_image() - - vis = Visualizer(img, metadata) - vis_gt = vis.draw_dataset_dict(dic).get_image() - - concat = np.concatenate((vis_pred, vis_gt), axis=1) - cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1]) diff --git a/spaces/nomic-ai/ag_news/index.html b/spaces/nomic-ai/ag_news/index.html deleted file mode 100644 index ea45f179a08ac2f94e5b09ef73b325a6e70d2ec3..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/ag_news/index.html +++ /dev/null @@ -1,42 +0,0 @@ - - - - ag_news - - - - -
    - -
    - - - \ No newline at end of file diff --git a/spaces/oliver2023/chatgpt-on-wechat/bot/openai/open_ai_session.py b/spaces/oliver2023/chatgpt-on-wechat/bot/openai/open_ai_session.py deleted file mode 100644 index 597611cd1f7b0245f92f569db63ea6f2af2171c0..0000000000000000000000000000000000000000 --- a/spaces/oliver2023/chatgpt-on-wechat/bot/openai/open_ai_session.py +++ /dev/null @@ -1,67 +0,0 @@ -from bot.session_manager import Session -from common.log import logger -class OpenAISession(Session): - def __init__(self, session_id, system_prompt=None, model= "text-davinci-003"): - super().__init__(session_id, system_prompt) - self.model = model - self.reset() - - def __str__(self): - # 构造对话模型的输入 - ''' - e.g. Q: xxx - A: xxx - Q: xxx - ''' - prompt = "" - for item in self.messages: - if item['role'] == 'system': - prompt += item['content'] + "<|endoftext|>\n\n\n" - elif item['role'] == 'user': - prompt += "Q: " + item['content'] + "\n" - elif item['role'] == 'assistant': - prompt += "\n\nA: " + item['content'] + "<|endoftext|>\n" - - if len(self.messages) > 0 and self.messages[-1]['role'] == 'user': - prompt += "A: " - return prompt - - def discard_exceeding(self, max_tokens, cur_tokens= None): - precise = True - try: - cur_tokens = num_tokens_from_string(str(self), self.model) - except Exception as e: - precise = False - if cur_tokens is None: - raise e - logger.debug("Exception when counting tokens precisely for query: {}".format(e)) - while cur_tokens > max_tokens: - if len(self.messages) > 1: - self.messages.pop(0) - elif len(self.messages) == 1 and self.messages[0]["role"] == "assistant": - self.messages.pop(0) - if precise: - cur_tokens = num_tokens_from_string(str(self), self.model) - else: - cur_tokens = len(str(self)) - break - elif len(self.messages) == 1 and self.messages[0]["role"] == "user": - logger.warn("user question exceed max_tokens. total_tokens={}".format(cur_tokens)) - break - else: - logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.messages))) - break - if precise: - cur_tokens = num_tokens_from_string(str(self), self.model) - else: - cur_tokens = len(str(self)) - return cur_tokens - - -# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb -def num_tokens_from_string(string: str, model: str) -> int: - """Returns the number of tokens in a text string.""" - import tiktoken - encoding = tiktoken.encoding_for_model(model) - num_tokens = len(encoding.encode(string,disallowed_special=())) - return num_tokens \ No newline at end of file diff --git a/spaces/oskarvanderwal/MT-bias-demo/description.md b/spaces/oskarvanderwal/MT-bias-demo/description.md deleted file mode 100644 index ff7c06d08d526bf311ae036fb2003892b314b310..0000000000000000000000000000000000000000 --- a/spaces/oskarvanderwal/MT-bias-demo/description.md +++ /dev/null @@ -1,9 +0,0 @@ -# Gender Bias in MT: Hungarian to English - -The Hungarian language has no grammatical gender and words like “he” and “she” are both translated as “ő”. -This makes it an interesting language to study gender bias in machine translation (MT) models, when translating to another language that does distinguish between “he” and “she”. -In this demo, we will test the OPUS-MT models (Tiedemann & Thottingal, 2020) from the *Language Technology Research Group at the University of Helsinki* ([Helsinki-NLP](https://github.com/Helsinki-NLP)). - -For each translation, we also use the [Inseq library](https://github.com/inseq-team/inseq) to compute the feature attributions with integrated gradients: How important is each token in the source (Hungarian) for the translation of the target tokens (English)? - -⚠️ Please note that this demo is just an illustration of how gender bias could manifest in MT models, but an actual assessment of its bias requires a more rigourous experiment. \ No newline at end of file diff --git a/spaces/overlordx/starlight/app.py b/spaces/overlordx/starlight/app.py deleted file mode 100644 index 6b158b31e6ae0b9903a7d4a663b7b9d854ba6ccd..0000000000000000000000000000000000000000 --- a/spaces/overlordx/starlight/app.py +++ /dev/null @@ -1,9 +0,0 @@ -import streamlit as st - -x = st.slider('Select a value') -st.write(x, 'squared is', x * x) -microphone = st.audio -picture = st.camera_input("Take a picture") -if picture: - this_image = st.image(picture) -print("Done!") diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/README.md b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/README.md deleted file mode 100644 index fb91f59411265660e01d8b4bcc0b99e8b8fe9d55..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Models - -For more detail on the models, please refer to the [docs](https://huggingface.co/docs/diffusers/api/models/overview). \ No newline at end of file diff --git a/spaces/pinkq/Newbing/src/components/user-menu.tsx b/spaces/pinkq/Newbing/src/components/user-menu.tsx deleted file mode 100644 index 9bd1edc9cf9f39b63629b021f0c1186b1a7c1341..0000000000000000000000000000000000000000 --- a/spaces/pinkq/Newbing/src/components/user-menu.tsx +++ /dev/null @@ -1,113 +0,0 @@ -'use client' - -import { useEffect, useState } from 'react' -import Image from 'next/image' -import { toast } from 'react-hot-toast' -import { Button } from '@/components/ui/button' -import pkg from '../../package.json' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuSeparator, - DropdownMenuTrigger -} from '@/components/ui/dropdown-menu' -import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons' -import SettingIcon from '@/assets/images/settings.svg' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function UserMenu() { - const [host, setHost] = useState('') - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - useEffect(() => { - setHost(location.host) - }, []) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - return ( -
    - - - - - - - location.href='#dialog="settings"' - } - className="cursor-pointer" - > - 设置用户 - - - - location.href='#dialog="voice"' - } - className="cursor-pointer" - > - 语音设置 - - - - - 开源地址 - - - - - - - - 托管地址 - 🤗 - - - - - - - 复制站点 - - - - - -
    版本信息 {pkg.version}
    -
    - - -
    站点域名
    -
    copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer"> - {host} -
    -
    -
    -
    -
    - ) -} diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/models/__init__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/models/__init__.py deleted file mode 100644 index 7855226e4b500142deef8fb247cd33a9a991d122..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/models/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""A package that contains models that represent entities. -""" diff --git a/spaces/podsni/twitter_sentiment_id/README.md b/spaces/podsni/twitter_sentiment_id/README.md deleted file mode 100644 index 4c092451e00cb3a526846866d4e90c29da8cb3b8..0000000000000000000000000000000000000000 --- a/spaces/podsni/twitter_sentiment_id/README.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Indo Twitter Sentiment App -emoji: 👀 -colorFrom: green -colorTo: yellow -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false -duplicated_from: dafqi/indo_twitter_sentiment_app ---- - -# twitter sentiment app - -Aplikasi sederhana untuk melakukan analisis sentimen terhadap tweet yang diinputkan dan mengekstrak topik dari setiap sentimen - -link website : https://dafiqrahman-twitter-sentiment-app-app-shcgk3.streamlit.app/ - diff --git a/spaces/portal/Top-20/masto.html b/spaces/portal/Top-20/masto.html deleted file mode 100644 index 2c7116aa8c0cc0a93984636a62a1e300ae00c5cb..0000000000000000000000000000000000000000 --- a/spaces/portal/Top-20/masto.html +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/portal/guanaco-playground/back.html b/spaces/portal/guanaco-playground/back.html deleted file mode 100644 index 52ffd62718c6c110a2ffe3ab96b9264e1e8dfcc8..0000000000000000000000000000000000000000 --- a/spaces/portal/guanaco-playground/back.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - -
    - -
    - - - \ No newline at end of file diff --git a/spaces/pragnakalp/BERT_based_QnA/save_data.py b/spaces/pragnakalp/BERT_based_QnA/save_data.py deleted file mode 100644 index 7b69e7420a9291b75e2c297e0371307bffefaed2..0000000000000000000000000000000000000000 --- a/spaces/pragnakalp/BERT_based_QnA/save_data.py +++ /dev/null @@ -1,82 +0,0 @@ -import os -import csv -import json -import requests -import re as r -from urllib.request import urlopen -from huggingface_hub import Repository - -HF_TOKEN = os.environ.get("HF_TOKEN") -DATASET_NAME = "bert_based_qna_dataset" -DATASET_REPO_URL = f"https://huggingface.co/datasets/pragnakalp/{DATASET_NAME}" -DATA_FILENAME = "qna_logs.csv" -DATA_FILE = os.path.join("qna_logs", DATA_FILENAME) -DATASET_REPO_ID = "pragnakalp/bert_based_qna_dataset" -print("is none?", HF_TOKEN is None) - -try: - hf_hub_download( - repo_id=DATASET_REPO_ID, - filename=DATA_FILENAME, - cache_dir=DATA_DIRNAME, - force_filename=DATA_FILENAME - ) - -except: - print("file not found") - -repo = Repository( - local_dir="qna_logs", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -) - -def getIP(): - d = str(urlopen('http://checkip.dyndns.com/') - .read()) - - return r.compile(r'Address: (\d+\.\d+\.\d+\.\d+)').search(d).group(1) - -def get_location(ip_addr): - ip=ip_addr - - req_data={ - "ip":ip, - "token":"pkml123" - } - url = "https://demos.pragnakalp.com/get-ip-location" - - # req_data=json.dumps(req_data) - # print("req_data",req_data) - headers = {'Content-Type': 'application/json'} - - response = requests.request("POST", url, headers=headers, data=json.dumps(req_data)) - response = response.json() - print("response======>>",response) - return response - -def save_data_and_sendmail(language, paragraph, result_dic): - try: - print("welcome") - ip_address = '' - - ip_address= getIP() - print(ip_address) - location = get_location(ip_address) - print(location) - add_csv = [language, paragraph, result_dic, ip_address, location] - with open(DATA_FILE, "a") as f: - writer = csv.writer(f) - # write the data - writer.writerow(add_csv) - commit_url = repo.push_to_hub() - print("commit data :",commit_url) - - url = 'https://pragnakalpdev33.pythonanywhere.com/HF_space_qna_gen' - myobj = {'lan':language,'para': paragraph,'result_dic':result_dic,'ip_addr':ip_address,"location":location} - x = requests.post(url, json = myobj) - print("Email status: ",x.status_code) - - return "Successfully save data" - - except Exception as e: - print("error") - return "Error while sending mail" + str(e) \ No newline at end of file diff --git a/spaces/pritamdeka/health-article-keyphrase-generator/nltkmodule.py b/spaces/pritamdeka/health-article-keyphrase-generator/nltkmodule.py deleted file mode 100644 index 1f82427a33bd2fb77aa43b36c00ab7643af9a6c5..0000000000000000000000000000000000000000 --- a/spaces/pritamdeka/health-article-keyphrase-generator/nltkmodule.py +++ /dev/null @@ -1,10 +0,0 @@ -import nltk - - -nltk.download('wordnet') -nltk.download('punkt') -nltk.download('stopwords') -nltk.download('averaged_perceptron_tagger') -nltk.download('maxent_ne_chunker') -nltk.download('words') -nltk.download('brown') \ No newline at end of file diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/altair/utils/_importers.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/altair/utils/_importers.py deleted file mode 100644 index 6ee41351dfe43a8df54045bd72020bc7f3fda42a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/altair/utils/_importers.py +++ /dev/null @@ -1,85 +0,0 @@ -from types import ModuleType -from packaging.version import Version -from importlib.metadata import version as importlib_version - - -def import_vegafusion() -> ModuleType: - min_version = "1.4.0" - try: - version = importlib_version("vegafusion") - if Version(version) < Version(min_version): - raise ImportError( - f"The vegafusion package must be version {min_version} or greater. " - f"Found version {version}" - ) - import vegafusion as vf # type: ignore - - return vf - except ImportError as err: - raise ImportError( - 'The "vegafusion" data transformer and chart.transformed_data feature requires\n' - f"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\n" - "These can be installed with pip using:\n" - f' pip install "vegafusion[embed]>={min_version}"\n' - "Or with conda using:\n" - f' conda install -c conda-forge "vegafusion-python-embed>={min_version}" ' - f'"vegafusion>={min_version}"\n\n' - f"ImportError: {err.args[0]}" - ) from err - - -def import_vl_convert() -> ModuleType: - min_version = "0.14.0" - try: - version = importlib_version("vl-convert-python") - if Version(version) < Version(min_version): - raise ImportError( - f"The vl-convert-python package must be version {min_version} or greater. " - f"Found version {version}" - ) - import vl_convert as vlc - - return vlc - except ImportError as err: - raise ImportError( - f"The vl-convert Vega-Lite compiler and image export feature requires\n" - f"version {min_version} or greater of the 'vl-convert-python' package. \n" - f"This can be installed with pip using:\n" - f' pip install "vl-convert-python>={min_version}"\n' - "or conda:\n" - f' conda install -c conda-forge "vl-convert-python>={min_version}"\n\n' - f"ImportError: {err.args[0]}" - ) from err - - -def import_pyarrow_interchange() -> ModuleType: - min_version = "11.0.0" - try: - version = importlib_version("pyarrow") - - if Version(version) < Version(min_version): - raise ImportError( - f"The pyarrow package must be version {min_version} or greater. " - f"Found version {version}" - ) - import pyarrow.interchange as pi - - return pi - except ImportError as err: - raise ImportError( - f"Usage of the DataFrame Interchange Protocol requires\n" - f"version {min_version} or greater of the pyarrow package. \n" - f"This can be installed with pip using:\n" - f' pip install "pyarrow>={min_version}"\n' - "or conda:\n" - f' conda install -c conda-forge "pyarrow>={min_version}"\n\n' - f"ImportError: {err.args[0]}" - ) from err - - -def pyarrow_available() -> bool: - try: - import_pyarrow_interchange() - return True - except ImportError: - return False diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/encodingTools.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/encodingTools.py deleted file mode 100644 index 3b2651d3b1ce222060fa67abaeac4da8030618fa..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/encodingTools.py +++ /dev/null @@ -1,72 +0,0 @@ -"""fontTools.misc.encodingTools.py -- tools for working with OpenType encodings. -""" - -import fontTools.encodings.codecs - -# Map keyed by platformID, then platEncID, then possibly langID -_encodingMap = { - 0: { # Unicode - 0: "utf_16_be", - 1: "utf_16_be", - 2: "utf_16_be", - 3: "utf_16_be", - 4: "utf_16_be", - 5: "utf_16_be", - 6: "utf_16_be", - }, - 1: { # Macintosh - # See - # https://github.com/fonttools/fonttools/issues/236 - 0: { # Macintosh, platEncID==0, keyed by langID - 15: "mac_iceland", - 17: "mac_turkish", - 18: "mac_croatian", - 24: "mac_latin2", - 25: "mac_latin2", - 26: "mac_latin2", - 27: "mac_latin2", - 28: "mac_latin2", - 36: "mac_latin2", - 37: "mac_romanian", - 38: "mac_latin2", - 39: "mac_latin2", - 40: "mac_latin2", - Ellipsis: "mac_roman", # Other - }, - 1: "x_mac_japanese_ttx", - 2: "x_mac_trad_chinese_ttx", - 3: "x_mac_korean_ttx", - 6: "mac_greek", - 7: "mac_cyrillic", - 25: "x_mac_simp_chinese_ttx", - 29: "mac_latin2", - 35: "mac_turkish", - 37: "mac_iceland", - }, - 2: { # ISO - 0: "ascii", - 1: "utf_16_be", - 2: "latin1", - }, - 3: { # Microsoft - 0: "utf_16_be", - 1: "utf_16_be", - 2: "shift_jis", - 3: "gb2312", - 4: "big5", - 5: "euc_kr", - 6: "johab", - 10: "utf_16_be", - }, -} - - -def getEncoding(platformID, platEncID, langID, default=None): - """Returns the Python encoding name for OpenType platformID/encodingID/langID - triplet. If encoding for these values is not known, by default None is - returned. That can be overriden by passing a value to the default argument. - """ - encoding = _encodingMap.get(platformID, {}).get(platEncID, default) - if isinstance(encoding, dict): - encoding = encoding.get(langID, encoding[Ellipsis]) - return encoding diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/wasm/src/webworker/index.ts b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/wasm/src/webworker/index.ts deleted file mode 100644 index 6959c961db1f47930d339f096bc12a806f1d4544..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/wasm/src/webworker/index.ts +++ /dev/null @@ -1,379 +0,0 @@ -/// -/* eslint-env worker */ - -import type { PyodideInterface } from "pyodide"; -import type { - InMessage, - InMessageInit, - OutMessage, - ReplyMessageError, - ReplyMessageSuccess -} from "../message-types"; -import { writeFileWithParents, renameWithParents } from "./file"; -import { verifyRequirements } from "./requirements"; -import { makeHttpRequest } from "./http"; -import { initWebSocket } from "./websocket"; -import scriptRunnerPySource from "./py/script_runner.py?raw"; -import unloadModulesPySource from "./py/unload_modules.py?raw"; - -importScripts("https://cdn.jsdelivr.net/pyodide/v0.24.0/full/pyodide.js"); - -let pyodide: PyodideInterface; - -let pyodideReadyPromise: undefined | Promise = undefined; - -let call_asgi_app_from_js: ( - scope: unknown, - receive: () => Promise, - send: (event: any) => Promise -) => Promise; -let run_script: (path: string) => Promise; -let unload_local_modules: (target_dir_path?: string) => void; - -function updateProgress(log: string): void { - const message: OutMessage = { - type: "progress-update", - data: { - log - } - }; - self.postMessage(message); -} - -async function loadPyodideAndPackages( - options: InMessageInit["data"] -): Promise { - console.debug("Loading Pyodide."); - updateProgress("Loading Pyodide"); - pyodide = await loadPyodide({ - stdout: console.debug, - stderr: console.error - }); - console.debug("Pyodide is loaded."); - - console.debug("Mounting files.", options.files); - updateProgress("Mounting files"); - await Promise.all( - Object.keys(options.files).map(async (path) => { - const file = options.files[path]; - - let data: string | ArrayBufferView; - if ("url" in file) { - console.debug(`Fetch a file from ${file.url}`); - data = await fetch(file.url) - .then((res) => res.arrayBuffer()) - .then((buffer) => new Uint8Array(buffer)); - } else { - data = file.data; - } - const { opts } = options.files[path]; - - console.debug(`Write a file "${path}"`); - writeFileWithParents(pyodide, path, data, opts); - }) - ); - console.debug("Files are mounted."); - - console.debug("Loading micropip"); - updateProgress("Loading micropip"); - await pyodide.loadPackage("micropip"); - const micropip = pyodide.pyimport("micropip"); - console.debug("micropip is loaded."); - - const gradioWheelUrls = [ - options.gradioWheelUrl, - options.gradioClientWheelUrl - ]; - console.debug("Loading Gradio wheels.", gradioWheelUrls); - updateProgress("Loading Gradio wheels"); - await micropip.add_mock_package("ffmpy", "0.3.0"); - await micropip.add_mock_package("aiohttp", "3.8.4"); - await pyodide.loadPackage(["ssl", "distutils", "setuptools"]); - await micropip.install(["typing-extensions>=4.8.0"]); // Typing extensions needs to be installed first otherwise the versions from the pyodide lockfile is used which is incompatible with the latest fastapi. - await micropip.install(["markdown-it-py[linkify]~=2.2.0"]); // On 3rd June 2023, markdown-it-py 3.0.0 has been released. The `gradio` package depends on its `>=2.0.0` version so its 3.x will be resolved. However, it conflicts with `mdit-py-plugins`'s dependency `markdown-it-py >=1.0.0,<3.0.0` and micropip currently can't resolve it. So we explicitly install the compatible version of the library here. - await micropip.install(["anyio==3.*"]); // `fastapi` depends on `anyio>=3.4.0,<5` so its 4.* can be installed, but it conflicts with the anyio version `httpx` depends on, `==3.*`. Seems like micropip can't resolve it for now, so we explicitly install the compatible version of the library here. - await micropip.install.callKwargs(gradioWheelUrls, { - keep_going: true - }); - console.debug("Gradio wheels are loaded."); - - console.debug("Installing packages.", options.requirements); - updateProgress("Installing packages"); - await micropip.install.callKwargs(options.requirements, { keep_going: true }); - console.debug("Packages are installed."); - - console.debug("Mocking os module methods."); - updateProgress("Mock os module methods"); - // `os.link` is used in `aiofiles` (https://github.com/Tinche/aiofiles/blob/v23.1.0/src/aiofiles/os.py#L31), - // which is imported from `gradio.ranged_response` (https://github.com/gradio-app/gradio/blob/v3.32.0/gradio/ranged_response.py#L12). - // However, it's not available on Wasm. - await pyodide.runPythonAsync(` -import os - -os.link = lambda src, dst: None -`); - console.debug("os module methods are mocked."); - - console.debug("Importing gradio package."); - updateProgress("Importing gradio package"); - // Importing the gradio package takes a long time, so we do it separately. - // This is necessary for accurate performance profiling. - await pyodide.runPythonAsync(`import gradio`); - console.debug("gradio package is imported."); - - console.debug("Defining a ASGI wrapper function."); - updateProgress("Defining a ASGI wrapper function"); - // TODO: Unlike Streamlit, user's code is executed in the global scope, - // so we should not define this function in the global scope. - await pyodide.runPythonAsync(` -# Based on Shiny's App.call_pyodide(). -# https://github.com/rstudio/py-shiny/blob/v0.3.3/shiny/_app.py#L224-L258 -async def _call_asgi_app_from_js(scope, receive, send): - # TODO: Pretty sure there are objects that need to be destroy()'d here? - scope = scope.to_py() - - # ASGI requires some values to be byte strings, not character strings. Those are - # not that easy to create in JavaScript, so we let the JS side pass us strings - # and we convert them to bytes here. - if "headers" in scope: - # JS doesn't have \`bytes\` so we pass as strings and convert here - scope["headers"] = [ - [value.encode("latin-1") for value in header] - for header in scope["headers"] - ] - if "query_string" in scope and scope["query_string"]: - scope["query_string"] = scope["query_string"].encode("latin-1") - if "raw_path" in scope and scope["raw_path"]: - scope["raw_path"] = scope["raw_path"].encode("latin-1") - - async def rcv(): - event = await receive() - return event.to_py() - - async def snd(event): - await send(event) - - app = gradio.wasm_utils.get_registered_app() - if app is None: - raise RuntimeError("Gradio app has not been launched.") - - await app(scope, rcv, snd) -`); - call_asgi_app_from_js = pyodide.globals.get("_call_asgi_app_from_js"); - console.debug("The ASGI wrapper function is defined."); - - console.debug("Mocking async libraries."); - updateProgress("Mocking async libraries"); - // FastAPI uses `anyio.to_thread.run_sync` internally which, however, doesn't work in Wasm environments where the `threading` module is not supported. - // So we mock `anyio.to_thread.run_sync` here not to use threads. - await pyodide.runPythonAsync(` -async def mocked_anyio_to_thread_run_sync(func, *args, cancellable=False, limiter=None): - return func(*args) - -import anyio.to_thread -anyio.to_thread.run_sync = mocked_anyio_to_thread_run_sync - `); - console.debug("Async libraries are mocked."); - - console.debug("Setting matplotlib backend."); - updateProgress("Setting matplotlib backend"); - // Ref: https://github.com/streamlit/streamlit/blob/1.22.0/lib/streamlit/web/bootstrap.py#L111 - // This backend setting is required to use matplotlib in Wasm environment. - await pyodide.runPythonAsync(` -import matplotlib -matplotlib.use("agg") -`); - console.debug("matplotlib backend is set."); - - console.debug("Setting up Python utility functions."); - updateProgress("Setting up Python utility functions"); - await pyodide.runPythonAsync(scriptRunnerPySource); - run_script = pyodide.globals.get("_run_script"); - await pyodide.runPythonAsync(unloadModulesPySource); - unload_local_modules = pyodide.globals.get("unload_local_modules"); - console.debug("Python utility functions are set up."); - - updateProgress("Initialization completed"); -} - -self.onmessage = async (event: MessageEvent): Promise => { - const msg = event.data; - console.debug("worker.onmessage", msg); - - const messagePort = event.ports[0]; - - try { - if (msg.type === "init") { - pyodideReadyPromise = loadPyodideAndPackages(msg.data); - - pyodideReadyPromise - .then(() => { - const replyMessage: ReplyMessageSuccess = { - type: "reply:success", - data: null - }; - messagePort.postMessage(replyMessage); - }) - .catch((error) => { - const replyMessage: ReplyMessageError = { - type: "reply:error", - error - }; - messagePort.postMessage(replyMessage); - }); - return; - } - - if (pyodideReadyPromise == null) { - throw new Error("Pyodide Initialization is not started."); - } - - await pyodideReadyPromise; - - switch (msg.type) { - case "echo": { - const replyMessage: ReplyMessageSuccess = { - type: "reply:success", - data: msg.data - }; - messagePort.postMessage(replyMessage); - break; - } - case "run-python-code": { - unload_local_modules(); - - await pyodide.runPythonAsync(msg.data.code); - - const replyMessage: ReplyMessageSuccess = { - type: "reply:success", - data: null // We don't send back the execution result because it's not needed for our purpose, and sometimes the result is of type `pyodide.ffi.PyProxy` which cannot be cloned across threads and causes an error. - }; - messagePort.postMessage(replyMessage); - break; - } - case "run-python-file": { - unload_local_modules(); - - await run_script(msg.data.path); - - const replyMessage: ReplyMessageSuccess = { - type: "reply:success", - data: null - }; - messagePort.postMessage(replyMessage); - break; - } - case "http-request": { - const request = msg.data.request; - const response = await makeHttpRequest(call_asgi_app_from_js, request); - const replyMessage: ReplyMessageSuccess = { - type: "reply:success", - data: { - response - } - }; - messagePort.postMessage(replyMessage); - break; - } - case "websocket": { - const { path } = msg.data; - - console.debug("Initialize a WebSocket connection: ", { path }); - initWebSocket(call_asgi_app_from_js, path, messagePort); // This promise is not awaited because it won't resolves until the WebSocket connection is closed. - break; - } - case "file:write": { - const { path, data: fileData, opts } = msg.data; - - console.debug(`Write a file "${path}"`); - writeFileWithParents(pyodide, path, fileData, opts); - - const replyMessage: ReplyMessageSuccess = { - type: "reply:success", - data: null - }; - messagePort.postMessage(replyMessage); - break; - } - case "file:rename": { - const { oldPath, newPath } = msg.data; - - console.debug(`Rename "${oldPath}" to ${newPath}`); - renameWithParents(pyodide, oldPath, newPath); - - const replyMessage: ReplyMessageSuccess = { - type: "reply:success", - data: null - }; - messagePort.postMessage(replyMessage); - break; - } - case "file:unlink": { - const { path } = msg.data; - - console.debug(`Remove "${path}`); - pyodide.FS.unlink(path); - - const replyMessage: ReplyMessageSuccess = { - type: "reply:success", - data: null - }; - messagePort.postMessage(replyMessage); - break; - } - case "install": { - const { requirements } = msg.data; - - const micropip = pyodide.pyimport("micropip"); - - console.debug("Install the requirements:", requirements); - verifyRequirements(requirements); // Blocks the not allowed wheel URL schemes. - await micropip.install - .callKwargs(requirements, { keep_going: true }) - .then(() => { - if (requirements.includes("matplotlib")) { - // Ref: https://github.com/streamlit/streamlit/blob/1.22.0/lib/streamlit/web/bootstrap.py#L111 - // This backend setting is required to use matplotlib in Wasm environment. - return pyodide.runPythonAsync(` - import matplotlib - matplotlib.use("agg") - `); - } - }) - .then(() => { - console.debug("Successfully installed"); - - const replyMessage: ReplyMessageSuccess = { - type: "reply:success", - data: null - }; - messagePort.postMessage(replyMessage); - }); - break; - } - } - } catch (error) { - console.error(error); - - if (!(error instanceof Error)) { - throw error; - } - - // The `error` object may contain non-serializable properties such as function (for example Pyodide.FS.ErrnoError which has a `.setErrno` function), - // so it must be converted to a plain object before sending it to the main thread. - // Otherwise, the following error will be thrown: - // `Uncaught (in promise) DOMException: Failed to execute 'postMessage' on 'MessagePort': # could not be cloned.` - // Also, the JSON.stringify() and JSON.parse() approach like https://stackoverflow.com/a/42376465/13103190 - // does not work for Error objects because the Error object is not enumerable. - // So we use the following approach to clone the Error object. - const cloneableError = new Error(error.message); - cloneableError.name = error.name; - cloneableError.stack = error.stack; - - const replyMessage: ReplyMessageError = { - type: "reply:error", - error: cloneableError - }; - messagePort.postMessage(replyMessage); - } -}; diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-4c24bf1c.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-4c24bf1c.js deleted file mode 100644 index cbcbb6607b264b390249dea8a58d1d491390ef83..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-4c24bf1c.js +++ /dev/null @@ -1,2 +0,0 @@ -import{a as k}from"./Button-8eeccca1.js";import"./Index-c74a8b7c.js";import"./index-50ad4c77.js";import"./svelte/svelte.js";const{SvelteComponent:w,create_component:z,destroy_component:B,detach:q,init:C,insert:I,mount_component:S,safe_not_equal:j,set_data:A,text:D,transition_in:E,transition_out:F}=window.__gradio__svelte__internal;function G(a){let i=a[11].i18n(a[3])+"",l;return{c(){l=D(i)},m(e,t){I(e,l,t)},p(e,t){t&2056&&i!==(i=e[11].i18n(e[3])+"")&&A(l,i)},d(e){e&&q(l)}}}function H(a){let i,l;return i=new k({props:{value:a[3],variant:a[4],elem_id:a[0],elem_classes:a[1],size:a[6],scale:a[7],link:a[9],icon:a[8],min_width:a[10],visible:a[2],root:a[12],proxy_url:a[13],disabled:!a[5],$$slots:{default:[G]},$$scope:{ctx:a}}}),i.$on("click",a[14]),{c(){z(i.$$.fragment)},m(e,t){S(i,e,t),l=!0},p(e,[t]){const f={};t&8&&(f.value=e[3]),t&16&&(f.variant=e[4]),t&1&&(f.elem_id=e[0]),t&2&&(f.elem_classes=e[1]),t&64&&(f.size=e[6]),t&128&&(f.scale=e[7]),t&512&&(f.link=e[9]),t&256&&(f.icon=e[8]),t&1024&&(f.min_width=e[10]),t&4&&(f.visible=e[2]),t&4096&&(f.root=e[12]),t&8192&&(f.proxy_url=e[13]),t&32&&(f.disabled=!e[5]),t&34824&&(f.$$scope={dirty:t,ctx:e}),i.$set(f)},i(e){l||(E(i.$$.fragment,e),l=!0)},o(e){F(i.$$.fragment,e),l=!1},d(e){B(i,e)}}}function J(a,i,l){let{elem_id:e=""}=i,{elem_classes:t=[]}=i,{visible:f=!0}=i,{value:u}=i,{variant:m="secondary"}=i,{interactive:s}=i,{size:o="lg"}=i,{scale:c=null}=i,{icon:r=null}=i,{link:v=null}=i,{min_width:g=void 0}=i,{gradio:_}=i,{root:h=""}=i,{proxy_url:b=null}=i;const d=()=>_.dispatch("click");return a.$$set=n=>{"elem_id"in n&&l(0,e=n.elem_id),"elem_classes"in n&&l(1,t=n.elem_classes),"visible"in n&&l(2,f=n.visible),"value"in n&&l(3,u=n.value),"variant"in n&&l(4,m=n.variant),"interactive"in n&&l(5,s=n.interactive),"size"in n&&l(6,o=n.size),"scale"in n&&l(7,c=n.scale),"icon"in n&&l(8,r=n.icon),"link"in n&&l(9,v=n.link),"min_width"in n&&l(10,g=n.min_width),"gradio"in n&&l(11,_=n.gradio),"root"in n&&l(12,h=n.root),"proxy_url"in n&&l(13,b=n.proxy_url)},[e,t,f,u,m,s,o,c,r,v,g,_,h,b,d]}class O extends w{constructor(i){super(),C(this,i,J,H,j,{elem_id:0,elem_classes:1,visible:2,value:3,variant:4,interactive:5,size:6,scale:7,icon:8,link:9,min_width:10,gradio:11,root:12,proxy_url:13})}}export{k as BaseButton,O as default}; -//# sourceMappingURL=Index-4c24bf1c.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/projections/polar.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/projections/polar.py deleted file mode 100644 index 0bff320e57289f807f377c7a31532d2fecdb9edf..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/projections/polar.py +++ /dev/null @@ -1,1536 +0,0 @@ -import math -import types - -import numpy as np - -import matplotlib as mpl -from matplotlib import _api, cbook -from matplotlib.axes import Axes -import matplotlib.axis as maxis -import matplotlib.markers as mmarkers -import matplotlib.patches as mpatches -from matplotlib.path import Path -import matplotlib.ticker as mticker -import matplotlib.transforms as mtransforms -from matplotlib.spines import Spine - - -class PolarTransform(mtransforms.Transform): - r""" - The base polar transform. - - This transform maps polar coordinates :math:`\theta, r` into Cartesian - coordinates :math:`x, y = r \cos(\theta), r \sin(\theta)` - (but does not fully transform into Axes coordinates or - handle positioning in screen space). - - This transformation is designed to be applied to data after any scaling - along the radial axis (e.g. log-scaling) has been applied to the input - data. - - Path segments at a fixed radius are automatically transformed to circular - arcs as long as ``path._interpolation_steps > 1``. - """ - - input_dims = output_dims = 2 - - def __init__(self, axis=None, use_rmin=True, - _apply_theta_transforms=True, *, scale_transform=None): - """ - Parameters - ---------- - axis : `~matplotlib.axis.Axis`, optional - Axis associated with this transform. This is used to get the - minimum radial limit. - use_rmin : `bool`, optional - If ``True``, subtract the minimum radial axis limit before - transforming to Cartesian coordinates. *axis* must also be - specified for this to take effect. - """ - super().__init__() - self._axis = axis - self._use_rmin = use_rmin - self._apply_theta_transforms = _apply_theta_transforms - self._scale_transform = scale_transform - - __str__ = mtransforms._make_str_method( - "_axis", - use_rmin="_use_rmin", - _apply_theta_transforms="_apply_theta_transforms") - - def _get_rorigin(self): - # Get lower r limit after being scaled by the radial scale transform - return self._scale_transform.transform( - (0, self._axis.get_rorigin()))[1] - - @_api.rename_parameter("3.8", "tr", "values") - def transform_non_affine(self, values): - # docstring inherited - theta, r = np.transpose(values) - # PolarAxes does not use the theta transforms here, but apply them for - # backwards-compatibility if not being used by it. - if self._apply_theta_transforms and self._axis is not None: - theta *= self._axis.get_theta_direction() - theta += self._axis.get_theta_offset() - if self._use_rmin and self._axis is not None: - r = (r - self._get_rorigin()) * self._axis.get_rsign() - r = np.where(r >= 0, r, np.nan) - return np.column_stack([r * np.cos(theta), r * np.sin(theta)]) - - def transform_path_non_affine(self, path): - # docstring inherited - if not len(path) or path._interpolation_steps == 1: - return Path(self.transform_non_affine(path.vertices), path.codes) - xys = [] - codes = [] - last_t = last_r = None - for trs, c in path.iter_segments(): - trs = trs.reshape((-1, 2)) - if c == Path.LINETO: - (t, r), = trs - if t == last_t: # Same angle: draw a straight line. - xys.extend(self.transform_non_affine(trs)) - codes.append(Path.LINETO) - elif r == last_r: # Same radius: draw an arc. - # The following is complicated by Path.arc() being - # "helpful" and unwrapping the angles, but we don't want - # that behavior here. - last_td, td = np.rad2deg([last_t, t]) - if self._use_rmin and self._axis is not None: - r = ((r - self._get_rorigin()) - * self._axis.get_rsign()) - if last_td <= td: - while td - last_td > 360: - arc = Path.arc(last_td, last_td + 360) - xys.extend(arc.vertices[1:] * r) - codes.extend(arc.codes[1:]) - last_td += 360 - arc = Path.arc(last_td, td) - xys.extend(arc.vertices[1:] * r) - codes.extend(arc.codes[1:]) - else: - # The reverse version also relies on the fact that all - # codes but the first one are the same. - while last_td - td > 360: - arc = Path.arc(last_td - 360, last_td) - xys.extend(arc.vertices[::-1][1:] * r) - codes.extend(arc.codes[1:]) - last_td -= 360 - arc = Path.arc(td, last_td) - xys.extend(arc.vertices[::-1][1:] * r) - codes.extend(arc.codes[1:]) - else: # Interpolate. - trs = cbook.simple_linear_interpolation( - np.vstack([(last_t, last_r), trs]), - path._interpolation_steps)[1:] - xys.extend(self.transform_non_affine(trs)) - codes.extend([Path.LINETO] * len(trs)) - else: # Not a straight line. - xys.extend(self.transform_non_affine(trs)) - codes.extend([c] * len(trs)) - last_t, last_r = trs[-1] - return Path(xys, codes) - - def inverted(self): - # docstring inherited - return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin, - self._apply_theta_transforms) - - -class PolarAffine(mtransforms.Affine2DBase): - r""" - The affine part of the polar projection. - - Scales the output so that maximum radius rests on the edge of the axes - circle and the origin is mapped to (0.5, 0.5). The transform applied is - the same to x and y components and given by: - - .. math:: - - x_{1} = 0.5 \left [ \frac{x_{0}}{(r_{\max} - r_{\min})} + 1 \right ] - - :math:`r_{\min}, r_{\max}` are the minimum and maximum radial limits after - any scaling (e.g. log scaling) has been removed. - """ - def __init__(self, scale_transform, limits): - """ - Parameters - ---------- - scale_transform : `~matplotlib.transforms.Transform` - Scaling transform for the data. This is used to remove any scaling - from the radial view limits. - limits : `~matplotlib.transforms.BboxBase` - View limits of the data. The only part of its bounds that is used - is the y limits (for the radius limits). - """ - super().__init__() - self._scale_transform = scale_transform - self._limits = limits - self.set_children(scale_transform, limits) - self._mtx = None - - __str__ = mtransforms._make_str_method("_scale_transform", "_limits") - - def get_matrix(self): - # docstring inherited - if self._invalid: - limits_scaled = self._limits.transformed(self._scale_transform) - yscale = limits_scaled.ymax - limits_scaled.ymin - affine = mtransforms.Affine2D() \ - .scale(0.5 / yscale) \ - .translate(0.5, 0.5) - self._mtx = affine.get_matrix() - self._inverted = None - self._invalid = 0 - return self._mtx - - -class InvertedPolarTransform(mtransforms.Transform): - """ - The inverse of the polar transform, mapping Cartesian - coordinate space *x* and *y* back to *theta* and *r*. - """ - input_dims = output_dims = 2 - - def __init__(self, axis=None, use_rmin=True, - _apply_theta_transforms=True): - """ - Parameters - ---------- - axis : `~matplotlib.axis.Axis`, optional - Axis associated with this transform. This is used to get the - minimum radial limit. - use_rmin : `bool`, optional - If ``True`` add the minimum radial axis limit after - transforming from Cartesian coordinates. *axis* must also be - specified for this to take effect. - """ - super().__init__() - self._axis = axis - self._use_rmin = use_rmin - self._apply_theta_transforms = _apply_theta_transforms - - __str__ = mtransforms._make_str_method( - "_axis", - use_rmin="_use_rmin", - _apply_theta_transforms="_apply_theta_transforms") - - @_api.rename_parameter("3.8", "xy", "values") - def transform_non_affine(self, values): - # docstring inherited - x, y = values.T - r = np.hypot(x, y) - theta = (np.arctan2(y, x) + 2 * np.pi) % (2 * np.pi) - # PolarAxes does not use the theta transforms here, but apply them for - # backwards-compatibility if not being used by it. - if self._apply_theta_transforms and self._axis is not None: - theta -= self._axis.get_theta_offset() - theta *= self._axis.get_theta_direction() - theta %= 2 * np.pi - if self._use_rmin and self._axis is not None: - r += self._axis.get_rorigin() - r *= self._axis.get_rsign() - return np.column_stack([theta, r]) - - def inverted(self): - # docstring inherited - return PolarAxes.PolarTransform(self._axis, self._use_rmin, - self._apply_theta_transforms) - - -class ThetaFormatter(mticker.Formatter): - """ - Used to format the *theta* tick labels. Converts the native - unit of radians into degrees and adds a degree symbol. - """ - - def __call__(self, x, pos=None): - vmin, vmax = self.axis.get_view_interval() - d = np.rad2deg(abs(vmax - vmin)) - digits = max(-int(np.log10(d) - 1.5), 0) - # Use Unicode rather than mathtext with \circ, so that it will work - # correctly with any arbitrary font (assuming it has a degree sign), - # whereas $5\circ$ will only work correctly with one of the supported - # math fonts (Computer Modern and STIX). - return f"{np.rad2deg(x):0.{digits:d}f}\N{DEGREE SIGN}" - - -class _AxisWrapper: - def __init__(self, axis): - self._axis = axis - - def get_view_interval(self): - return np.rad2deg(self._axis.get_view_interval()) - - def set_view_interval(self, vmin, vmax): - self._axis.set_view_interval(*np.deg2rad((vmin, vmax))) - - def get_minpos(self): - return np.rad2deg(self._axis.get_minpos()) - - def get_data_interval(self): - return np.rad2deg(self._axis.get_data_interval()) - - def set_data_interval(self, vmin, vmax): - self._axis.set_data_interval(*np.deg2rad((vmin, vmax))) - - def get_tick_space(self): - return self._axis.get_tick_space() - - -class ThetaLocator(mticker.Locator): - """ - Used to locate theta ticks. - - This will work the same as the base locator except in the case that the - view spans the entire circle. In such cases, the previously used default - locations of every 45 degrees are returned. - """ - - def __init__(self, base): - self.base = base - self.axis = self.base.axis = _AxisWrapper(self.base.axis) - - def set_axis(self, axis): - self.axis = _AxisWrapper(axis) - self.base.set_axis(self.axis) - - def __call__(self): - lim = self.axis.get_view_interval() - if _is_full_circle_deg(lim[0], lim[1]): - return np.arange(8) * 2 * np.pi / 8 - else: - return np.deg2rad(self.base()) - - def view_limits(self, vmin, vmax): - vmin, vmax = np.rad2deg((vmin, vmax)) - return np.deg2rad(self.base.view_limits(vmin, vmax)) - - -class ThetaTick(maxis.XTick): - """ - A theta-axis tick. - - This subclass of `.XTick` provides angular ticks with some small - modification to their re-positioning such that ticks are rotated based on - tick location. This results in ticks that are correctly perpendicular to - the arc spine. - - When 'auto' rotation is enabled, labels are also rotated to be parallel to - the spine. The label padding is also applied here since it's not possible - to use a generic axes transform to produce tick-specific padding. - """ - - def __init__(self, axes, *args, **kwargs): - self._text1_translate = mtransforms.ScaledTranslation( - 0, 0, axes.figure.dpi_scale_trans) - self._text2_translate = mtransforms.ScaledTranslation( - 0, 0, axes.figure.dpi_scale_trans) - super().__init__(axes, *args, **kwargs) - self.label1.set( - rotation_mode='anchor', - transform=self.label1.get_transform() + self._text1_translate) - self.label2.set( - rotation_mode='anchor', - transform=self.label2.get_transform() + self._text2_translate) - - def _apply_params(self, **kwargs): - super()._apply_params(**kwargs) - # Ensure transform is correct; sometimes this gets reset. - trans = self.label1.get_transform() - if not trans.contains_branch(self._text1_translate): - self.label1.set_transform(trans + self._text1_translate) - trans = self.label2.get_transform() - if not trans.contains_branch(self._text2_translate): - self.label2.set_transform(trans + self._text2_translate) - - def _update_padding(self, pad, angle): - padx = pad * np.cos(angle) / 72 - pady = pad * np.sin(angle) / 72 - self._text1_translate._t = (padx, pady) - self._text1_translate.invalidate() - self._text2_translate._t = (-padx, -pady) - self._text2_translate.invalidate() - - def update_position(self, loc): - super().update_position(loc) - axes = self.axes - angle = loc * axes.get_theta_direction() + axes.get_theta_offset() - text_angle = np.rad2deg(angle) % 360 - 90 - angle -= np.pi / 2 - - marker = self.tick1line.get_marker() - if marker in (mmarkers.TICKUP, '|'): - trans = mtransforms.Affine2D().scale(1, 1).rotate(angle) - elif marker == mmarkers.TICKDOWN: - trans = mtransforms.Affine2D().scale(1, -1).rotate(angle) - else: - # Don't modify custom tick line markers. - trans = self.tick1line._marker._transform - self.tick1line._marker._transform = trans - - marker = self.tick2line.get_marker() - if marker in (mmarkers.TICKUP, '|'): - trans = mtransforms.Affine2D().scale(1, 1).rotate(angle) - elif marker == mmarkers.TICKDOWN: - trans = mtransforms.Affine2D().scale(1, -1).rotate(angle) - else: - # Don't modify custom tick line markers. - trans = self.tick2line._marker._transform - self.tick2line._marker._transform = trans - - mode, user_angle = self._labelrotation - if mode == 'default': - text_angle = user_angle - else: - if text_angle > 90: - text_angle -= 180 - elif text_angle < -90: - text_angle += 180 - text_angle += user_angle - self.label1.set_rotation(text_angle) - self.label2.set_rotation(text_angle) - - # This extra padding helps preserve the look from previous releases but - # is also needed because labels are anchored to their center. - pad = self._pad + 7 - self._update_padding(pad, - self._loc * axes.get_theta_direction() + - axes.get_theta_offset()) - - -class ThetaAxis(maxis.XAxis): - """ - A theta Axis. - - This overrides certain properties of an `.XAxis` to provide special-casing - for an angular axis. - """ - __name__ = 'thetaaxis' - axis_name = 'theta' #: Read-only name identifying the axis. - _tick_class = ThetaTick - - def _wrap_locator_formatter(self): - self.set_major_locator(ThetaLocator(self.get_major_locator())) - self.set_major_formatter(ThetaFormatter()) - self.isDefault_majloc = True - self.isDefault_majfmt = True - - def clear(self): - # docstring inherited - super().clear() - self.set_ticks_position('none') - self._wrap_locator_formatter() - - def _set_scale(self, value, **kwargs): - if value != 'linear': - raise NotImplementedError( - "The xscale cannot be set on a polar plot") - super()._set_scale(value, **kwargs) - # LinearScale.set_default_locators_and_formatters just set the major - # locator to be an AutoLocator, so we customize it here to have ticks - # at sensible degree multiples. - self.get_major_locator().set_params(steps=[1, 1.5, 3, 4.5, 9, 10]) - self._wrap_locator_formatter() - - def _copy_tick_props(self, src, dest): - """Copy the props from src tick to dest tick.""" - if src is None or dest is None: - return - super()._copy_tick_props(src, dest) - - # Ensure that tick transforms are independent so that padding works. - trans = dest._get_text1_transform()[0] - dest.label1.set_transform(trans + dest._text1_translate) - trans = dest._get_text2_transform()[0] - dest.label2.set_transform(trans + dest._text2_translate) - - -class RadialLocator(mticker.Locator): - """ - Used to locate radius ticks. - - Ensures that all ticks are strictly positive. For all other tasks, it - delegates to the base `.Locator` (which may be different depending on the - scale of the *r*-axis). - """ - - def __init__(self, base, axes=None): - self.base = base - self._axes = axes - - def set_axis(self, axis): - self.base.set_axis(axis) - - def __call__(self): - # Ensure previous behaviour with full circle non-annular views. - if self._axes: - if _is_full_circle_rad(*self._axes.viewLim.intervalx): - rorigin = self._axes.get_rorigin() * self._axes.get_rsign() - if self._axes.get_rmin() <= rorigin: - return [tick for tick in self.base() if tick > rorigin] - return self.base() - - def _zero_in_bounds(self): - """ - Return True if zero is within the valid values for the - scale of the radial axis. - """ - vmin, vmax = self._axes.yaxis._scale.limit_range_for_scale(0, 1, 1e-5) - return vmin == 0 - - def nonsingular(self, vmin, vmax): - # docstring inherited - if self._zero_in_bounds() and (vmin, vmax) == (-np.inf, np.inf): - # Initial view limits - return (0, 1) - else: - return self.base.nonsingular(vmin, vmax) - - def view_limits(self, vmin, vmax): - vmin, vmax = self.base.view_limits(vmin, vmax) - if self._zero_in_bounds() and vmax > vmin: - # this allows inverted r/y-lims - vmin = min(0, vmin) - return mtransforms.nonsingular(vmin, vmax) - - -class _ThetaShift(mtransforms.ScaledTranslation): - """ - Apply a padding shift based on axes theta limits. - - This is used to create padding for radial ticks. - - Parameters - ---------- - axes : `~matplotlib.axes.Axes` - The owning axes; used to determine limits. - pad : float - The padding to apply, in points. - mode : {'min', 'max', 'rlabel'} - Whether to shift away from the start (``'min'``) or the end (``'max'``) - of the axes, or using the rlabel position (``'rlabel'``). - """ - def __init__(self, axes, pad, mode): - super().__init__(pad, pad, axes.figure.dpi_scale_trans) - self.set_children(axes._realViewLim) - self.axes = axes - self.mode = mode - self.pad = pad - - __str__ = mtransforms._make_str_method("axes", "pad", "mode") - - def get_matrix(self): - if self._invalid: - if self.mode == 'rlabel': - angle = ( - np.deg2rad(self.axes.get_rlabel_position()) * - self.axes.get_theta_direction() + - self.axes.get_theta_offset() - ) - else: - if self.mode == 'min': - angle = self.axes._realViewLim.xmin - elif self.mode == 'max': - angle = self.axes._realViewLim.xmax - - if self.mode in ('rlabel', 'min'): - padx = np.cos(angle - np.pi / 2) - pady = np.sin(angle - np.pi / 2) - else: - padx = np.cos(angle + np.pi / 2) - pady = np.sin(angle + np.pi / 2) - - self._t = (self.pad * padx / 72, self.pad * pady / 72) - return super().get_matrix() - - -class RadialTick(maxis.YTick): - """ - A radial-axis tick. - - This subclass of `.YTick` provides radial ticks with some small - modification to their re-positioning such that ticks are rotated based on - axes limits. This results in ticks that are correctly perpendicular to - the spine. Labels are also rotated to be perpendicular to the spine, when - 'auto' rotation is enabled. - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.label1.set_rotation_mode('anchor') - self.label2.set_rotation_mode('anchor') - - def _determine_anchor(self, mode, angle, start): - # Note: angle is the (spine angle - 90) because it's used for the tick - # & text setup, so all numbers below are -90 from (normed) spine angle. - if mode == 'auto': - if start: - if -90 <= angle <= 90: - return 'left', 'center' - else: - return 'right', 'center' - else: - if -90 <= angle <= 90: - return 'right', 'center' - else: - return 'left', 'center' - else: - if start: - if angle < -68.5: - return 'center', 'top' - elif angle < -23.5: - return 'left', 'top' - elif angle < 22.5: - return 'left', 'center' - elif angle < 67.5: - return 'left', 'bottom' - elif angle < 112.5: - return 'center', 'bottom' - elif angle < 157.5: - return 'right', 'bottom' - elif angle < 202.5: - return 'right', 'center' - elif angle < 247.5: - return 'right', 'top' - else: - return 'center', 'top' - else: - if angle < -68.5: - return 'center', 'bottom' - elif angle < -23.5: - return 'right', 'bottom' - elif angle < 22.5: - return 'right', 'center' - elif angle < 67.5: - return 'right', 'top' - elif angle < 112.5: - return 'center', 'top' - elif angle < 157.5: - return 'left', 'top' - elif angle < 202.5: - return 'left', 'center' - elif angle < 247.5: - return 'left', 'bottom' - else: - return 'center', 'bottom' - - def update_position(self, loc): - super().update_position(loc) - axes = self.axes - thetamin = axes.get_thetamin() - thetamax = axes.get_thetamax() - direction = axes.get_theta_direction() - offset_rad = axes.get_theta_offset() - offset = np.rad2deg(offset_rad) - full = _is_full_circle_deg(thetamin, thetamax) - - if full: - angle = (axes.get_rlabel_position() * direction + - offset) % 360 - 90 - tick_angle = 0 - else: - angle = (thetamin * direction + offset) % 360 - 90 - if direction > 0: - tick_angle = np.deg2rad(angle) - else: - tick_angle = np.deg2rad(angle + 180) - text_angle = (angle + 90) % 180 - 90 # between -90 and +90. - mode, user_angle = self._labelrotation - if mode == 'auto': - text_angle += user_angle - else: - text_angle = user_angle - - if full: - ha = self.label1.get_horizontalalignment() - va = self.label1.get_verticalalignment() - else: - ha, va = self._determine_anchor(mode, angle, direction > 0) - self.label1.set_horizontalalignment(ha) - self.label1.set_verticalalignment(va) - self.label1.set_rotation(text_angle) - - marker = self.tick1line.get_marker() - if marker == mmarkers.TICKLEFT: - trans = mtransforms.Affine2D().rotate(tick_angle) - elif marker == '_': - trans = mtransforms.Affine2D().rotate(tick_angle + np.pi / 2) - elif marker == mmarkers.TICKRIGHT: - trans = mtransforms.Affine2D().scale(-1, 1).rotate(tick_angle) - else: - # Don't modify custom tick line markers. - trans = self.tick1line._marker._transform - self.tick1line._marker._transform = trans - - if full: - self.label2.set_visible(False) - self.tick2line.set_visible(False) - angle = (thetamax * direction + offset) % 360 - 90 - if direction > 0: - tick_angle = np.deg2rad(angle) - else: - tick_angle = np.deg2rad(angle + 180) - text_angle = (angle + 90) % 180 - 90 # between -90 and +90. - mode, user_angle = self._labelrotation - if mode == 'auto': - text_angle += user_angle - else: - text_angle = user_angle - - ha, va = self._determine_anchor(mode, angle, direction < 0) - self.label2.set_ha(ha) - self.label2.set_va(va) - self.label2.set_rotation(text_angle) - - marker = self.tick2line.get_marker() - if marker == mmarkers.TICKLEFT: - trans = mtransforms.Affine2D().rotate(tick_angle) - elif marker == '_': - trans = mtransforms.Affine2D().rotate(tick_angle + np.pi / 2) - elif marker == mmarkers.TICKRIGHT: - trans = mtransforms.Affine2D().scale(-1, 1).rotate(tick_angle) - else: - # Don't modify custom tick line markers. - trans = self.tick2line._marker._transform - self.tick2line._marker._transform = trans - - -class RadialAxis(maxis.YAxis): - """ - A radial Axis. - - This overrides certain properties of a `.YAxis` to provide special-casing - for a radial axis. - """ - __name__ = 'radialaxis' - axis_name = 'radius' #: Read-only name identifying the axis. - _tick_class = RadialTick - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.sticky_edges.y.append(0) - - def _wrap_locator_formatter(self): - self.set_major_locator(RadialLocator(self.get_major_locator(), - self.axes)) - self.isDefault_majloc = True - - def clear(self): - # docstring inherited - super().clear() - self.set_ticks_position('none') - self._wrap_locator_formatter() - - def _set_scale(self, value, **kwargs): - super()._set_scale(value, **kwargs) - self._wrap_locator_formatter() - - -def _is_full_circle_deg(thetamin, thetamax): - """ - Determine if a wedge (in degrees) spans the full circle. - - The condition is derived from :class:`~matplotlib.patches.Wedge`. - """ - return abs(abs(thetamax - thetamin) - 360.0) < 1e-12 - - -def _is_full_circle_rad(thetamin, thetamax): - """ - Determine if a wedge (in radians) spans the full circle. - - The condition is derived from :class:`~matplotlib.patches.Wedge`. - """ - return abs(abs(thetamax - thetamin) - 2 * np.pi) < 1.74e-14 - - -class _WedgeBbox(mtransforms.Bbox): - """ - Transform (theta, r) wedge Bbox into axes bounding box. - - Parameters - ---------- - center : (float, float) - Center of the wedge - viewLim : `~matplotlib.transforms.Bbox` - Bbox determining the boundaries of the wedge - originLim : `~matplotlib.transforms.Bbox` - Bbox determining the origin for the wedge, if different from *viewLim* - """ - def __init__(self, center, viewLim, originLim, **kwargs): - super().__init__([[0, 0], [1, 1]], **kwargs) - self._center = center - self._viewLim = viewLim - self._originLim = originLim - self.set_children(viewLim, originLim) - - __str__ = mtransforms._make_str_method("_center", "_viewLim", "_originLim") - - def get_points(self): - # docstring inherited - if self._invalid: - points = self._viewLim.get_points().copy() - # Scale angular limits to work with Wedge. - points[:, 0] *= 180 / np.pi - if points[0, 0] > points[1, 0]: - points[:, 0] = points[::-1, 0] - - # Scale radial limits based on origin radius. - points[:, 1] -= self._originLim.y0 - - # Scale radial limits to match axes limits. - rscale = 0.5 / points[1, 1] - points[:, 1] *= rscale - width = min(points[1, 1] - points[0, 1], 0.5) - - # Generate bounding box for wedge. - wedge = mpatches.Wedge(self._center, points[1, 1], - points[0, 0], points[1, 0], - width=width) - self.update_from_path(wedge.get_path()) - - # Ensure equal aspect ratio. - w, h = self._points[1] - self._points[0] - deltah = max(w - h, 0) / 2 - deltaw = max(h - w, 0) / 2 - self._points += np.array([[-deltaw, -deltah], [deltaw, deltah]]) - - self._invalid = 0 - - return self._points - - -class PolarAxes(Axes): - """ - A polar graph projection, where the input dimensions are *theta*, *r*. - - Theta starts pointing east and goes anti-clockwise. - """ - name = 'polar' - - def __init__(self, *args, - theta_offset=0, theta_direction=1, rlabel_position=22.5, - **kwargs): - # docstring inherited - self._default_theta_offset = theta_offset - self._default_theta_direction = theta_direction - self._default_rlabel_position = np.deg2rad(rlabel_position) - super().__init__(*args, **kwargs) - self.use_sticky_edges = True - self.set_aspect('equal', adjustable='box', anchor='C') - self.clear() - - def clear(self): - # docstring inherited - super().clear() - - self.title.set_y(1.05) - - start = self.spines.get('start', None) - if start: - start.set_visible(False) - end = self.spines.get('end', None) - if end: - end.set_visible(False) - self.set_xlim(0.0, 2 * np.pi) - - self.grid(mpl.rcParams['polaraxes.grid']) - inner = self.spines.get('inner', None) - if inner: - inner.set_visible(False) - - self.set_rorigin(None) - self.set_theta_offset(self._default_theta_offset) - self.set_theta_direction(self._default_theta_direction) - - def _init_axis(self): - # This is moved out of __init__ because non-separable axes don't use it - self.xaxis = ThetaAxis(self, clear=False) - self.yaxis = RadialAxis(self, clear=False) - self.spines['polar'].register_axis(self.yaxis) - - def _set_lim_and_transforms(self): - # A view limit where the minimum radius can be locked if the user - # specifies an alternate origin. - self._originViewLim = mtransforms.LockableBbox(self.viewLim) - - # Handle angular offset and direction. - self._direction = mtransforms.Affine2D() \ - .scale(self._default_theta_direction, 1.0) - self._theta_offset = mtransforms.Affine2D() \ - .translate(self._default_theta_offset, 0.0) - self.transShift = self._direction + self._theta_offset - # A view limit shifted to the correct location after accounting for - # orientation and offset. - self._realViewLim = mtransforms.TransformedBbox(self.viewLim, - self.transShift) - - # Transforms the x and y axis separately by a scale factor - # It is assumed that this part will have non-linear components - self.transScale = mtransforms.TransformWrapper( - mtransforms.IdentityTransform()) - - # Scale view limit into a bbox around the selected wedge. This may be - # smaller than the usual unit axes rectangle if not plotting the full - # circle. - self.axesLim = _WedgeBbox((0.5, 0.5), - self._realViewLim, self._originViewLim) - - # Scale the wedge to fill the axes. - self.transWedge = mtransforms.BboxTransformFrom(self.axesLim) - - # Scale the axes to fill the figure. - self.transAxes = mtransforms.BboxTransformTo(self.bbox) - - # A (possibly non-linear) projection on the (already scaled) - # data. This one is aware of rmin - self.transProjection = self.PolarTransform( - self, - _apply_theta_transforms=False, - scale_transform=self.transScale - ) - # Add dependency on rorigin. - self.transProjection.set_children(self._originViewLim) - - # An affine transformation on the data, generally to limit the - # range of the axes - self.transProjectionAffine = self.PolarAffine(self.transScale, - self._originViewLim) - - # The complete data transformation stack -- from data all the - # way to display coordinates - # - # 1. Remove any radial axis scaling (e.g. log scaling) - # 2. Shift data in the theta direction - # 3. Project the data from polar to cartesian values - # (with the origin in the same place) - # 4. Scale and translate the cartesian values to Axes coordinates - # (here the origin is moved to the lower left of the Axes) - # 5. Move and scale to fill the Axes - # 6. Convert from Axes coordinates to Figure coordinates - self.transData = ( - self.transScale + - self.transShift + - self.transProjection + - ( - self.transProjectionAffine + - self.transWedge + - self.transAxes - ) - ) - - # This is the transform for theta-axis ticks. It is - # equivalent to transData, except it always puts r == 0.0 and r == 1.0 - # at the edge of the axis circles. - self._xaxis_transform = ( - mtransforms.blended_transform_factory( - mtransforms.IdentityTransform(), - mtransforms.BboxTransformTo(self.viewLim)) + - self.transData) - # The theta labels are flipped along the radius, so that text 1 is on - # the outside by default. This should work the same as before. - flipr_transform = mtransforms.Affine2D() \ - .translate(0.0, -0.5) \ - .scale(1.0, -1.0) \ - .translate(0.0, 0.5) - self._xaxis_text_transform = flipr_transform + self._xaxis_transform - - # This is the transform for r-axis ticks. It scales the theta - # axis so the gridlines from 0.0 to 1.0, now go from thetamin to - # thetamax. - self._yaxis_transform = ( - mtransforms.blended_transform_factory( - mtransforms.BboxTransformTo(self.viewLim), - mtransforms.IdentityTransform()) + - self.transData) - # The r-axis labels are put at an angle and padded in the r-direction - self._r_label_position = mtransforms.Affine2D() \ - .translate(self._default_rlabel_position, 0.0) - self._yaxis_text_transform = mtransforms.TransformWrapper( - self._r_label_position + self.transData) - - def get_xaxis_transform(self, which='grid'): - _api.check_in_list(['tick1', 'tick2', 'grid'], which=which) - return self._xaxis_transform - - def get_xaxis_text1_transform(self, pad): - return self._xaxis_text_transform, 'center', 'center' - - def get_xaxis_text2_transform(self, pad): - return self._xaxis_text_transform, 'center', 'center' - - def get_yaxis_transform(self, which='grid'): - if which in ('tick1', 'tick2'): - return self._yaxis_text_transform - elif which == 'grid': - return self._yaxis_transform - else: - _api.check_in_list(['tick1', 'tick2', 'grid'], which=which) - - def get_yaxis_text1_transform(self, pad): - thetamin, thetamax = self._realViewLim.intervalx - if _is_full_circle_rad(thetamin, thetamax): - return self._yaxis_text_transform, 'bottom', 'left' - elif self.get_theta_direction() > 0: - halign = 'left' - pad_shift = _ThetaShift(self, pad, 'min') - else: - halign = 'right' - pad_shift = _ThetaShift(self, pad, 'max') - return self._yaxis_text_transform + pad_shift, 'center', halign - - def get_yaxis_text2_transform(self, pad): - if self.get_theta_direction() > 0: - halign = 'right' - pad_shift = _ThetaShift(self, pad, 'max') - else: - halign = 'left' - pad_shift = _ThetaShift(self, pad, 'min') - return self._yaxis_text_transform + pad_shift, 'center', halign - - def draw(self, renderer): - self._unstale_viewLim() - thetamin, thetamax = np.rad2deg(self._realViewLim.intervalx) - if thetamin > thetamax: - thetamin, thetamax = thetamax, thetamin - rmin, rmax = ((self._realViewLim.intervaly - self.get_rorigin()) * - self.get_rsign()) - if isinstance(self.patch, mpatches.Wedge): - # Backwards-compatibility: Any subclassed Axes might override the - # patch to not be the Wedge that PolarAxes uses. - center = self.transWedge.transform((0.5, 0.5)) - self.patch.set_center(center) - self.patch.set_theta1(thetamin) - self.patch.set_theta2(thetamax) - - edge, _ = self.transWedge.transform((1, 0)) - radius = edge - center[0] - width = min(radius * (rmax - rmin) / rmax, radius) - self.patch.set_radius(radius) - self.patch.set_width(width) - - inner_width = radius - width - inner = self.spines.get('inner', None) - if inner: - inner.set_visible(inner_width != 0.0) - - visible = not _is_full_circle_deg(thetamin, thetamax) - # For backwards compatibility, any subclassed Axes might override the - # spines to not include start/end that PolarAxes uses. - start = self.spines.get('start', None) - end = self.spines.get('end', None) - if start: - start.set_visible(visible) - if end: - end.set_visible(visible) - if visible: - yaxis_text_transform = self._yaxis_transform - else: - yaxis_text_transform = self._r_label_position + self.transData - if self._yaxis_text_transform != yaxis_text_transform: - self._yaxis_text_transform.set(yaxis_text_transform) - self.yaxis.reset_ticks() - self.yaxis.set_clip_path(self.patch) - - super().draw(renderer) - - def _gen_axes_patch(self): - return mpatches.Wedge((0.5, 0.5), 0.5, 0.0, 360.0) - - def _gen_axes_spines(self): - spines = { - 'polar': Spine.arc_spine(self, 'top', (0.5, 0.5), 0.5, 0, 360), - 'start': Spine.linear_spine(self, 'left'), - 'end': Spine.linear_spine(self, 'right'), - 'inner': Spine.arc_spine(self, 'bottom', (0.5, 0.5), 0.0, 0, 360), - } - spines['polar'].set_transform(self.transWedge + self.transAxes) - spines['inner'].set_transform(self.transWedge + self.transAxes) - spines['start'].set_transform(self._yaxis_transform) - spines['end'].set_transform(self._yaxis_transform) - return spines - - def set_thetamax(self, thetamax): - """Set the maximum theta limit in degrees.""" - self.viewLim.x1 = np.deg2rad(thetamax) - - def get_thetamax(self): - """Return the maximum theta limit in degrees.""" - return np.rad2deg(self.viewLim.xmax) - - def set_thetamin(self, thetamin): - """Set the minimum theta limit in degrees.""" - self.viewLim.x0 = np.deg2rad(thetamin) - - def get_thetamin(self): - """Get the minimum theta limit in degrees.""" - return np.rad2deg(self.viewLim.xmin) - - def set_thetalim(self, *args, **kwargs): - r""" - Set the minimum and maximum theta values. - - Can take the following signatures: - - - ``set_thetalim(minval, maxval)``: Set the limits in radians. - - ``set_thetalim(thetamin=minval, thetamax=maxval)``: Set the limits - in degrees. - - where minval and maxval are the minimum and maximum limits. Values are - wrapped in to the range :math:`[0, 2\pi]` (in radians), so for example - it is possible to do ``set_thetalim(-np.pi / 2, np.pi / 2)`` to have - an axis symmetric around 0. A ValueError is raised if the absolute - angle difference is larger than a full circle. - """ - orig_lim = self.get_xlim() # in radians - if 'thetamin' in kwargs: - kwargs['xmin'] = np.deg2rad(kwargs.pop('thetamin')) - if 'thetamax' in kwargs: - kwargs['xmax'] = np.deg2rad(kwargs.pop('thetamax')) - new_min, new_max = self.set_xlim(*args, **kwargs) - # Parsing all permutations of *args, **kwargs is tricky; it is simpler - # to let set_xlim() do it and then validate the limits. - if abs(new_max - new_min) > 2 * np.pi: - self.set_xlim(orig_lim) # un-accept the change - raise ValueError("The angle range must be less than a full circle") - return tuple(np.rad2deg((new_min, new_max))) - - def set_theta_offset(self, offset): - """ - Set the offset for the location of 0 in radians. - """ - mtx = self._theta_offset.get_matrix() - mtx[0, 2] = offset - self._theta_offset.invalidate() - - def get_theta_offset(self): - """ - Get the offset for the location of 0 in radians. - """ - return self._theta_offset.get_matrix()[0, 2] - - def set_theta_zero_location(self, loc, offset=0.0): - """ - Set the location of theta's zero. - - This simply calls `set_theta_offset` with the correct value in radians. - - Parameters - ---------- - loc : str - May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE". - offset : float, default: 0 - An offset in degrees to apply from the specified *loc*. **Note:** - this offset is *always* applied counter-clockwise regardless of - the direction setting. - """ - mapping = { - 'N': np.pi * 0.5, - 'NW': np.pi * 0.75, - 'W': np.pi, - 'SW': np.pi * 1.25, - 'S': np.pi * 1.5, - 'SE': np.pi * 1.75, - 'E': 0, - 'NE': np.pi * 0.25} - return self.set_theta_offset(mapping[loc] + np.deg2rad(offset)) - - def set_theta_direction(self, direction): - """ - Set the direction in which theta increases. - - clockwise, -1: - Theta increases in the clockwise direction - - counterclockwise, anticlockwise, 1: - Theta increases in the counterclockwise direction - """ - mtx = self._direction.get_matrix() - if direction in ('clockwise', -1): - mtx[0, 0] = -1 - elif direction in ('counterclockwise', 'anticlockwise', 1): - mtx[0, 0] = 1 - else: - _api.check_in_list( - [-1, 1, 'clockwise', 'counterclockwise', 'anticlockwise'], - direction=direction) - self._direction.invalidate() - - def get_theta_direction(self): - """ - Get the direction in which theta increases. - - -1: - Theta increases in the clockwise direction - - 1: - Theta increases in the counterclockwise direction - """ - return self._direction.get_matrix()[0, 0] - - def set_rmax(self, rmax): - """ - Set the outer radial limit. - - Parameters - ---------- - rmax : float - """ - self.viewLim.y1 = rmax - - def get_rmax(self): - """ - Returns - ------- - float - Outer radial limit. - """ - return self.viewLim.ymax - - def set_rmin(self, rmin): - """ - Set the inner radial limit. - - Parameters - ---------- - rmin : float - """ - self.viewLim.y0 = rmin - - def get_rmin(self): - """ - Returns - ------- - float - The inner radial limit. - """ - return self.viewLim.ymin - - def set_rorigin(self, rorigin): - """ - Update the radial origin. - - Parameters - ---------- - rorigin : float - """ - self._originViewLim.locked_y0 = rorigin - - def get_rorigin(self): - """ - Returns - ------- - float - """ - return self._originViewLim.y0 - - def get_rsign(self): - return np.sign(self._originViewLim.y1 - self._originViewLim.y0) - - def set_rlim(self, bottom=None, top=None, *, - emit=True, auto=False, **kwargs): - """ - Set the radial axis view limits. - - This function behaves like `.Axes.set_ylim`, but additionally supports - *rmin* and *rmax* as aliases for *bottom* and *top*. - - See Also - -------- - .Axes.set_ylim - """ - if 'rmin' in kwargs: - if bottom is None: - bottom = kwargs.pop('rmin') - else: - raise ValueError('Cannot supply both positional "bottom"' - 'argument and kwarg "rmin"') - if 'rmax' in kwargs: - if top is None: - top = kwargs.pop('rmax') - else: - raise ValueError('Cannot supply both positional "top"' - 'argument and kwarg "rmax"') - return self.set_ylim(bottom=bottom, top=top, emit=emit, auto=auto, - **kwargs) - - def get_rlabel_position(self): - """ - Returns - ------- - float - The theta position of the radius labels in degrees. - """ - return np.rad2deg(self._r_label_position.get_matrix()[0, 2]) - - def set_rlabel_position(self, value): - """ - Update the theta position of the radius labels. - - Parameters - ---------- - value : number - The angular position of the radius labels in degrees. - """ - self._r_label_position.clear().translate(np.deg2rad(value), 0.0) - - def set_yscale(self, *args, **kwargs): - super().set_yscale(*args, **kwargs) - self.yaxis.set_major_locator( - self.RadialLocator(self.yaxis.get_major_locator(), self)) - - def set_rscale(self, *args, **kwargs): - return Axes.set_yscale(self, *args, **kwargs) - - def set_rticks(self, *args, **kwargs): - return Axes.set_yticks(self, *args, **kwargs) - - def set_thetagrids(self, angles, labels=None, fmt=None, **kwargs): - """ - Set the theta gridlines in a polar plot. - - Parameters - ---------- - angles : tuple with floats, degrees - The angles of the theta gridlines. - - labels : tuple with strings or None - The labels to use at each theta gridline. The - `.projections.polar.ThetaFormatter` will be used if None. - - fmt : str or None - Format string used in `matplotlib.ticker.FormatStrFormatter`. - For example '%f'. Note that the angle that is used is in - radians. - - Returns - ------- - lines : list of `.lines.Line2D` - The theta gridlines. - - labels : list of `.text.Text` - The tick labels. - - Other Parameters - ---------------- - **kwargs - *kwargs* are optional `.Text` properties for the labels. - - .. warning:: - - This only sets the properties of the current ticks. - Ticks are not guaranteed to be persistent. Various operations - can create, delete and modify the Tick instances. There is an - imminent risk that these settings can get lost if you work on - the figure further (including also panning/zooming on a - displayed figure). - - Use `.set_tick_params` instead if possible. - - See Also - -------- - .PolarAxes.set_rgrids - .Axis.get_gridlines - .Axis.get_ticklabels - """ - - # Make sure we take into account unitized data - angles = self.convert_yunits(angles) - angles = np.deg2rad(angles) - self.set_xticks(angles) - if labels is not None: - self.set_xticklabels(labels) - elif fmt is not None: - self.xaxis.set_major_formatter(mticker.FormatStrFormatter(fmt)) - for t in self.xaxis.get_ticklabels(): - t._internal_update(kwargs) - return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels() - - def set_rgrids(self, radii, labels=None, angle=None, fmt=None, **kwargs): - """ - Set the radial gridlines on a polar plot. - - Parameters - ---------- - radii : tuple with floats - The radii for the radial gridlines - - labels : tuple with strings or None - The labels to use at each radial gridline. The - `matplotlib.ticker.ScalarFormatter` will be used if None. - - angle : float - The angular position of the radius labels in degrees. - - fmt : str or None - Format string used in `matplotlib.ticker.FormatStrFormatter`. - For example '%f'. - - Returns - ------- - lines : list of `.lines.Line2D` - The radial gridlines. - - labels : list of `.text.Text` - The tick labels. - - Other Parameters - ---------------- - **kwargs - *kwargs* are optional `.Text` properties for the labels. - - .. warning:: - - This only sets the properties of the current ticks. - Ticks are not guaranteed to be persistent. Various operations - can create, delete and modify the Tick instances. There is an - imminent risk that these settings can get lost if you work on - the figure further (including also panning/zooming on a - displayed figure). - - Use `.set_tick_params` instead if possible. - - See Also - -------- - .PolarAxes.set_thetagrids - .Axis.get_gridlines - .Axis.get_ticklabels - """ - # Make sure we take into account unitized data - radii = self.convert_xunits(radii) - radii = np.asarray(radii) - - self.set_yticks(radii) - if labels is not None: - self.set_yticklabels(labels) - elif fmt is not None: - self.yaxis.set_major_formatter(mticker.FormatStrFormatter(fmt)) - if angle is None: - angle = self.get_rlabel_position() - self.set_rlabel_position(angle) - for t in self.yaxis.get_ticklabels(): - t._internal_update(kwargs) - return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels() - - def format_coord(self, theta, r): - # docstring inherited - screen_xy = self.transData.transform((theta, r)) - screen_xys = screen_xy + np.stack( - np.meshgrid([-1, 0, 1], [-1, 0, 1])).reshape((2, -1)).T - ts, rs = self.transData.inverted().transform(screen_xys).T - delta_t = abs((ts - theta + np.pi) % (2 * np.pi) - np.pi).max() - delta_t_halfturns = delta_t / np.pi - delta_t_degrees = delta_t_halfturns * 180 - delta_r = abs(rs - r).max() - if theta < 0: - theta += 2 * np.pi - theta_halfturns = theta / np.pi - theta_degrees = theta_halfturns * 180 - - # See ScalarFormatter.format_data_short. For r, use #g-formatting - # (as for linear axes), but for theta, use f-formatting as scientific - # notation doesn't make sense and the trailing dot is ugly. - def format_sig(value, delta, opt, fmt): - # For "f", only count digits after decimal point. - prec = (max(0, -math.floor(math.log10(delta))) if fmt == "f" else - cbook._g_sig_digits(value, delta)) - return f"{value:-{opt}.{prec}{fmt}}" - - return ('\N{GREEK SMALL LETTER THETA}={}\N{GREEK SMALL LETTER PI} ' - '({}\N{DEGREE SIGN}), r={}').format( - format_sig(theta_halfturns, delta_t_halfturns, "", "f"), - format_sig(theta_degrees, delta_t_degrees, "", "f"), - format_sig(r, delta_r, "#", "g"), - ) - - def get_data_ratio(self): - """ - Return the aspect ratio of the data itself. For a polar plot, - this should always be 1.0 - """ - return 1.0 - - # # # Interactive panning - - def can_zoom(self): - """ - Return whether this Axes supports the zoom box button functionality. - - A polar Axes does not support zoom boxes. - """ - return False - - def can_pan(self): - """ - Return whether this Axes supports the pan/zoom button functionality. - - For a polar Axes, this is slightly misleading. Both panning and - zooming are performed by the same button. Panning is performed - in azimuth while zooming is done along the radial. - """ - return True - - def start_pan(self, x, y, button): - angle = np.deg2rad(self.get_rlabel_position()) - mode = '' - if button == 1: - epsilon = np.pi / 45.0 - t, r = self.transData.inverted().transform((x, y)) - if angle - epsilon <= t <= angle + epsilon: - mode = 'drag_r_labels' - elif button == 3: - mode = 'zoom' - - self._pan_start = types.SimpleNamespace( - rmax=self.get_rmax(), - trans=self.transData.frozen(), - trans_inverse=self.transData.inverted().frozen(), - r_label_angle=self.get_rlabel_position(), - x=x, - y=y, - mode=mode) - - def end_pan(self): - del self._pan_start - - def drag_pan(self, button, key, x, y): - p = self._pan_start - - if p.mode == 'drag_r_labels': - (startt, startr), (t, r) = p.trans_inverse.transform( - [(p.x, p.y), (x, y)]) - - # Deal with theta - dt = np.rad2deg(startt - t) - self.set_rlabel_position(p.r_label_angle - dt) - - trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0) - trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0) - for t in self.yaxis.majorTicks + self.yaxis.minorTicks: - t.label1.set_va(vert1) - t.label1.set_ha(horiz1) - t.label2.set_va(vert2) - t.label2.set_ha(horiz2) - - elif p.mode == 'zoom': - (startt, startr), (t, r) = p.trans_inverse.transform( - [(p.x, p.y), (x, y)]) - - # Deal with r - scale = r / startr - self.set_rmax(p.rmax / scale) - - -# To keep things all self-contained, we can put aliases to the Polar classes -# defined above. This isn't strictly necessary, but it makes some of the -# code more readable, and provides a backwards compatible Polar API. In -# particular, this is used by the :doc:`/gallery/specialty_plots/radar_chart` -# example to override PolarTransform on a PolarAxes subclass, so make sure that -# that example is unaffected before changing this. -PolarAxes.PolarTransform = PolarTransform -PolarAxes.PolarAffine = PolarAffine -PolarAxes.InvertedPolarTransform = InvertedPolarTransform -PolarAxes.ThetaFormatter = ThetaFormatter -PolarAxes.RadialLocator = RadialLocator -PolarAxes.ThetaLocator = ThetaLocator diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/testing/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/testing/__init__.py deleted file mode 100644 index da21eb42c5b2cf07bcc8fdd7c2a97ee9594e4b86..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/testing/__init__.py +++ /dev/null @@ -1,174 +0,0 @@ -""" -Helper functions for testing. -""" -from pathlib import Path -from tempfile import TemporaryDirectory -import locale -import logging -import os -import subprocess -import sys - -import matplotlib as mpl -from matplotlib import _api - -_log = logging.getLogger(__name__) - - -def set_font_settings_for_testing(): - mpl.rcParams['font.family'] = 'DejaVu Sans' - mpl.rcParams['text.hinting'] = 'none' - mpl.rcParams['text.hinting_factor'] = 8 - - -def set_reproducibility_for_testing(): - mpl.rcParams['svg.hashsalt'] = 'matplotlib' - - -def setup(): - # The baseline images are created in this locale, so we should use - # it during all of the tests. - - try: - locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') - except locale.Error: - try: - locale.setlocale(locale.LC_ALL, 'English_United States.1252') - except locale.Error: - _log.warning( - "Could not set locale to English/United States. " - "Some date-related tests may fail.") - - mpl.use('Agg') - - with _api.suppress_matplotlib_deprecation_warning(): - mpl.rcdefaults() # Start with all defaults - - # These settings *must* be hardcoded for running the comparison tests and - # are not necessarily the default values as specified in rcsetup.py. - set_font_settings_for_testing() - set_reproducibility_for_testing() - - -def subprocess_run_for_testing(command, env=None, timeout=None, stdout=None, - stderr=None, check=False, text=True, - capture_output=False): - """ - Create and run a subprocess. - - Thin wrapper around `subprocess.run`, intended for testing. Will - mark fork() failures on Cygwin as expected failures: not a - success, but not indicating a problem with the code either. - - Parameters - ---------- - args : list of str - env : dict[str, str] - timeout : float - stdout, stderr - check : bool - text : bool - Also called ``universal_newlines`` in subprocess. I chose this - name since the main effect is returning bytes (`False`) vs. str - (`True`), though it also tries to normalize newlines across - platforms. - capture_output : bool - Set stdout and stderr to subprocess.PIPE - - Returns - ------- - proc : subprocess.Popen - - See Also - -------- - subprocess.run - - Raises - ------ - pytest.xfail - If platform is Cygwin and subprocess reports a fork() failure. - """ - if capture_output: - stdout = stderr = subprocess.PIPE - try: - proc = subprocess.run( - command, env=env, - timeout=timeout, check=check, - stdout=stdout, stderr=stderr, - text=text - ) - except BlockingIOError: - if sys.platform == "cygwin": - # Might want to make this more specific - import pytest - pytest.xfail("Fork failure") - raise - return proc - - -def subprocess_run_helper(func, *args, timeout, extra_env=None): - """ - Run a function in a sub-process. - - Parameters - ---------- - func : function - The function to be run. It must be in a module that is importable. - *args : str - Any additional command line arguments to be passed in - the first argument to ``subprocess.run``. - extra_env : dict[str, str] - Any additional environment variables to be set for the subprocess. - """ - target = func.__name__ - module = func.__module__ - proc = subprocess_run_for_testing( - [ - sys.executable, - "-c", - f"from {module} import {target}; {target}()", - *args - ], - env={**os.environ, "SOURCE_DATE_EPOCH": "0", **(extra_env or {})}, - timeout=timeout, check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True - ) - return proc - - -def _check_for_pgf(texsystem): - """ - Check if a given TeX system + pgf is available - - Parameters - ---------- - texsystem : str - The executable name to check - """ - with TemporaryDirectory() as tmpdir: - tex_path = Path(tmpdir, "test.tex") - tex_path.write_text(r""" - \documentclass{article} - \usepackage{pgf} - \begin{document} - \typeout{pgfversion=\pgfversion} - \makeatletter - \@@end - """, encoding="utf-8") - try: - subprocess.check_call( - [texsystem, "-halt-on-error", str(tex_path)], cwd=tmpdir, - stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) - except (OSError, subprocess.CalledProcessError): - return False - return True - - -def _has_tex_package(package): - try: - mpl.dviread.find_tex_file(f"{package}.sty") - return True - except FileNotFoundError: - return False diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c deleted file mode 100644 index 5799f122b511420eb16d066c31dc218bc4fae110..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c +++ /dev/null @@ -1,24 +0,0 @@ -#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER) - /* - * Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics, - * whether or not the build options for those features are specified. - * Therefore, we must test #definitions of CPU features when option native/host - * is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise - * the test will be broken and leads to enable all possible features. - */ - #if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__) - #error "HOST/ARCH doesn't support CannonLake AVX512 features" - #endif -#endif - -#include - -int main(int argc, char **argv) -{ - __m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]); - /* IFMA */ - a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512()); - /* VMBI */ - a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a); - return _mm_cvtsi128_si32(_mm512_castsi512_si128(a)); -} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/conv_template.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/conv_template.py deleted file mode 100644 index c8933d1d42865f745bb985f7f9068a96985997f7..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/conv_template.py +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/env python3 -""" -takes templated file .xxx.src and produces .xxx file where .xxx is -.i or .c or .h, using the following template rules - -/**begin repeat -- on a line by itself marks the start of a repeated code - segment -/**end repeat**/ -- on a line by itself marks it's end - -After the /**begin repeat and before the */, all the named templates are placed -these should all have the same number of replacements - -Repeat blocks can be nested, with each nested block labeled with its depth, -i.e. -/**begin repeat1 - *.... - */ -/**end repeat1**/ - -When using nested loops, you can optionally exclude particular -combinations of the variables using (inside the comment portion of the inner loop): - - :exclude: var1=value1, var2=value2, ... - -This will exclude the pattern where var1 is value1 and var2 is value2 when -the result is being generated. - - -In the main body each replace will use one entry from the list of named replacements - - Note that all #..# forms in a block must have the same number of - comma-separated entries. - -Example: - - An input file containing - - /**begin repeat - * #a = 1,2,3# - * #b = 1,2,3# - */ - - /**begin repeat1 - * #c = ted, jim# - */ - @a@, @b@, @c@ - /**end repeat1**/ - - /**end repeat**/ - - produces - - line 1 "template.c.src" - - /* - ********************************************************************* - ** This file was autogenerated from a template DO NOT EDIT!!** - ** Changes should be made to the original source (.src) file ** - ********************************************************************* - */ - - #line 9 - 1, 1, ted - - #line 9 - 1, 1, jim - - #line 9 - 2, 2, ted - - #line 9 - 2, 2, jim - - #line 9 - 3, 3, ted - - #line 9 - 3, 3, jim - -""" - -__all__ = ['process_str', 'process_file'] - -import os -import sys -import re - -# names for replacement that are already global. -global_names = {} - -# header placed at the front of head processed file -header =\ -""" -/* - ***************************************************************************** - ** This file was autogenerated from a template DO NOT EDIT!!!! ** - ** Changes should be made to the original source (.src) file ** - ***************************************************************************** - */ - -""" -# Parse string for repeat loops -def parse_structure(astr, level): - """ - The returned line number is from the beginning of the string, starting - at zero. Returns an empty list if no loops found. - - """ - if level == 0 : - loopbeg = "/**begin repeat" - loopend = "/**end repeat**/" - else : - loopbeg = "/**begin repeat%d" % level - loopend = "/**end repeat%d**/" % level - - ind = 0 - line = 0 - spanlist = [] - while True: - start = astr.find(loopbeg, ind) - if start == -1: - break - start2 = astr.find("*/", start) - start2 = astr.find("\n", start2) - fini1 = astr.find(loopend, start2) - fini2 = astr.find("\n", fini1) - line += astr.count("\n", ind, start2+1) - spanlist.append((start, start2+1, fini1, fini2+1, line)) - line += astr.count("\n", start2+1, fini2) - ind = fini2 - spanlist.sort() - return spanlist - - -def paren_repl(obj): - torep = obj.group(1) - numrep = obj.group(2) - return ','.join([torep]*int(numrep)) - -parenrep = re.compile(r"\(([^)]*)\)\*(\d+)") -plainrep = re.compile(r"([^*]+)\*(\d+)") -def parse_values(astr): - # replaces all occurrences of '(a,b,c)*4' in astr - # with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate - # empty values, i.e., ()*4 yields ',,,'. The result is - # split at ',' and a list of values returned. - astr = parenrep.sub(paren_repl, astr) - # replaces occurrences of xxx*3 with xxx, xxx, xxx - astr = ','.join([plainrep.sub(paren_repl, x.strip()) - for x in astr.split(',')]) - return astr.split(',') - - -stripast = re.compile(r"\n\s*\*?") -named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#") -exclude_vars_re = re.compile(r"(\w*)=(\w*)") -exclude_re = re.compile(":exclude:") -def parse_loop_header(loophead) : - """Find all named replacements in the header - - Returns a list of dictionaries, one for each loop iteration, - where each key is a name to be substituted and the corresponding - value is the replacement string. - - Also return a list of exclusions. The exclusions are dictionaries - of key value pairs. There can be more than one exclusion. - [{'var1':'value1', 'var2', 'value2'[,...]}, ...] - - """ - # Strip out '\n' and leading '*', if any, in continuation lines. - # This should not effect code previous to this change as - # continuation lines were not allowed. - loophead = stripast.sub("", loophead) - # parse out the names and lists of values - names = [] - reps = named_re.findall(loophead) - nsub = None - for rep in reps: - name = rep[0] - vals = parse_values(rep[1]) - size = len(vals) - if nsub is None : - nsub = size - elif nsub != size : - msg = "Mismatch in number of values, %d != %d\n%s = %s" - raise ValueError(msg % (nsub, size, name, vals)) - names.append((name, vals)) - - - # Find any exclude variables - excludes = [] - - for obj in exclude_re.finditer(loophead): - span = obj.span() - # find next newline - endline = loophead.find('\n', span[1]) - substr = loophead[span[1]:endline] - ex_names = exclude_vars_re.findall(substr) - excludes.append(dict(ex_names)) - - # generate list of dictionaries, one for each template iteration - dlist = [] - if nsub is None : - raise ValueError("No substitution variables found") - for i in range(nsub): - tmp = {name: vals[i] for name, vals in names} - dlist.append(tmp) - return dlist - -replace_re = re.compile(r"@(\w+)@") -def parse_string(astr, env, level, line) : - lineno = "#line %d\n" % line - - # local function for string replacement, uses env - def replace(match): - name = match.group(1) - try : - val = env[name] - except KeyError: - msg = 'line %d: no definition of key "%s"'%(line, name) - raise ValueError(msg) from None - return val - - code = [lineno] - struct = parse_structure(astr, level) - if struct : - # recurse over inner loops - oldend = 0 - newlevel = level + 1 - for sub in struct: - pref = astr[oldend:sub[0]] - head = astr[sub[0]:sub[1]] - text = astr[sub[1]:sub[2]] - oldend = sub[3] - newline = line + sub[4] - code.append(replace_re.sub(replace, pref)) - try : - envlist = parse_loop_header(head) - except ValueError as e: - msg = "line %d: %s" % (newline, e) - raise ValueError(msg) - for newenv in envlist : - newenv.update(env) - newcode = parse_string(text, newenv, newlevel, newline) - code.extend(newcode) - suff = astr[oldend:] - code.append(replace_re.sub(replace, suff)) - else : - # replace keys - code.append(replace_re.sub(replace, astr)) - code.append('\n') - return ''.join(code) - -def process_str(astr): - code = [header] - code.extend(parse_string(astr, global_names, 0, 1)) - return ''.join(code) - - -include_src_re = re.compile(r"(\n|\A)#include\s*['\"]" - r"(?P[\w\d./\\]+[.]src)['\"]", re.I) - -def resolve_includes(source): - d = os.path.dirname(source) - with open(source) as fid: - lines = [] - for line in fid: - m = include_src_re.match(line) - if m: - fn = m.group('name') - if not os.path.isabs(fn): - fn = os.path.join(d, fn) - if os.path.isfile(fn): - lines.extend(resolve_includes(fn)) - else: - lines.append(line) - else: - lines.append(line) - return lines - -def process_file(source): - lines = resolve_includes(source) - sourcefile = os.path.normcase(source).replace("\\", "\\\\") - try: - code = process_str(''.join(lines)) - except ValueError as e: - raise ValueError('In "%s" loop at %s' % (sourcefile, e)) from None - return '#line 1 "%s"\n%s' % (sourcefile, code) - - -def unique_key(adict): - # this obtains a unique key given a dictionary - # currently it works by appending together n of the letters of the - # current keys and increasing n until a unique key is found - # -- not particularly quick - allkeys = list(adict.keys()) - done = False - n = 1 - while not done: - newkey = "".join([x[:n] for x in allkeys]) - if newkey in allkeys: - n += 1 - else: - done = True - return newkey - - -def main(): - try: - file = sys.argv[1] - except IndexError: - fid = sys.stdin - outfile = sys.stdout - else: - fid = open(file, 'r') - (base, ext) = os.path.splitext(file) - newname = base - outfile = open(newname, 'w') - - allstr = fid.read() - try: - writestr = process_str(allstr) - except ValueError as e: - raise ValueError("In %s loop at %s" % (file, e)) from None - - outfile.write(writestr) - -if __name__ == "__main__": - main() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/wandb_logger.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/wandb_logger.py deleted file mode 100644 index d8e060c41be3ff60fa4eff70fb0601161203e137..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/wandb_logger.py +++ /dev/null @@ -1,314 +0,0 @@ -try: - import wandb - - WANDB_AVAILABLE = True -except: - WANDB_AVAILABLE = False - - -if WANDB_AVAILABLE: - import datetime - import io - import json - import re - from pathlib import Path - - from openai import File, FineTune, FineTuningJob - from openai.datalib.numpy_helper import numpy as np - from openai.datalib.pandas_helper import assert_has_pandas, pandas as pd - - -class WandbLogger: - """ - Log fine-tunes to [Weights & Biases](https://wandb.me/openai-docs) - """ - - if not WANDB_AVAILABLE: - print("Logging requires wandb to be installed. Run `pip install wandb`.") - else: - _wandb_api = None - _logged_in = False - - @classmethod - def sync( - cls, - id=None, - n_fine_tunes=None, - project="OpenAI-Fine-Tune", - entity=None, - force=False, - legacy=False, - **kwargs_wandb_init, - ): - """ - Sync fine-tunes to Weights & Biases. - :param id: The id of the fine-tune (optional) - :param n_fine_tunes: Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced. - :param project: Name of the project where you're sending runs. By default, it is "GPT-3". - :param entity: Username or team name where you're sending runs. By default, your default entity is used, which is usually your username. - :param force: Forces logging and overwrite existing wandb run of the same fine-tune. - """ - - assert_has_pandas() - - if not WANDB_AVAILABLE: - return - - if id: - print("Retrieving fine-tune job...") - if legacy: - fine_tune = FineTune.retrieve(id=id) - else: - fine_tune = FineTuningJob.retrieve(id=id) - fine_tune.pop("events", None) - fine_tunes = [fine_tune] - else: - # get list of fine_tune to log - if legacy: - fine_tunes = FineTune.list() - else: - fine_tunes = list(FineTuningJob.auto_paging_iter()) - if not fine_tunes or fine_tunes.get("data") is None: - print("No fine-tune has been retrieved") - return - fine_tunes = fine_tunes["data"][ - -n_fine_tunes if n_fine_tunes is not None else None : - ] - - # log starting from oldest fine_tune - show_individual_warnings = ( - False if id is None and n_fine_tunes is None else True - ) - fine_tune_logged = [ - cls._log_fine_tune( - fine_tune, - project, - entity, - force, - legacy, - show_individual_warnings, - **kwargs_wandb_init, - ) - for fine_tune in fine_tunes - ] - - if not show_individual_warnings and not any(fine_tune_logged): - print("No new successful fine-tunes were found") - - return "🎉 wandb sync completed successfully" - - @classmethod - def _log_fine_tune( - cls, - fine_tune, - project, - entity, - force, - legacy, - show_individual_warnings, - **kwargs_wandb_init, - ): - fine_tune_id = fine_tune.get("id") - status = fine_tune.get("status") - - # check run completed successfully - if status != "succeeded": - if show_individual_warnings: - print( - f'Fine-tune {fine_tune_id} has the status "{status}" and will not be logged' - ) - return - - # check results are present - try: - if legacy: - results_id = fine_tune["result_files"][0]["id"] - else: - results_id = fine_tune["result_files"][0] - results = File.download(id=results_id).decode("utf-8") - except: - if show_individual_warnings: - print(f"Fine-tune {fine_tune_id} has no results and will not be logged") - return - - # check run has not been logged already - run_path = f"{project}/{fine_tune_id}" - if entity is not None: - run_path = f"{entity}/{run_path}" - wandb_run = cls._get_wandb_run(run_path) - if wandb_run: - wandb_status = wandb_run.summary.get("status") - if show_individual_warnings: - if wandb_status == "succeeded": - print( - f"Fine-tune {fine_tune_id} has already been logged successfully at {wandb_run.url}" - ) - if not force: - print( - 'Use "--force" in the CLI or "force=True" in python if you want to overwrite previous run' - ) - else: - print( - f"A run for fine-tune {fine_tune_id} was previously created but didn't end successfully" - ) - if wandb_status != "succeeded" or force: - print( - f"A new wandb run will be created for fine-tune {fine_tune_id} and previous run will be overwritten" - ) - if wandb_status == "succeeded" and not force: - return - - # start a wandb run - wandb.init( - job_type="fine-tune", - config=cls._get_config(fine_tune), - project=project, - entity=entity, - name=fine_tune_id, - id=fine_tune_id, - **kwargs_wandb_init, - ) - - # log results - df_results = pd.read_csv(io.StringIO(results)) - for _, row in df_results.iterrows(): - metrics = {k: v for k, v in row.items() if not np.isnan(v)} - step = metrics.pop("step") - if step is not None: - step = int(step) - wandb.log(metrics, step=step) - fine_tuned_model = fine_tune.get("fine_tuned_model") - if fine_tuned_model is not None: - wandb.summary["fine_tuned_model"] = fine_tuned_model - - # training/validation files and fine-tune details - cls._log_artifacts(fine_tune, project, entity) - - # mark run as complete - wandb.summary["status"] = "succeeded" - - wandb.finish() - return True - - @classmethod - def _ensure_logged_in(cls): - if not cls._logged_in: - if wandb.login(): - cls._logged_in = True - else: - raise Exception("You need to log in to wandb") - - @classmethod - def _get_wandb_run(cls, run_path): - cls._ensure_logged_in() - try: - if cls._wandb_api is None: - cls._wandb_api = wandb.Api() - return cls._wandb_api.run(run_path) - except Exception: - return None - - @classmethod - def _get_wandb_artifact(cls, artifact_path): - cls._ensure_logged_in() - try: - if cls._wandb_api is None: - cls._wandb_api = wandb.Api() - return cls._wandb_api.artifact(artifact_path) - except Exception: - return None - - @classmethod - def _get_config(cls, fine_tune): - config = dict(fine_tune) - for key in ("training_files", "validation_files", "result_files"): - if config.get(key) and len(config[key]): - config[key] = config[key][0] - if config.get("created_at"): - config["created_at"] = datetime.datetime.fromtimestamp(config["created_at"]) - return config - - @classmethod - def _log_artifacts(cls, fine_tune, project, entity): - # training/validation files - training_file = ( - fine_tune["training_files"][0] - if fine_tune.get("training_files") and len(fine_tune["training_files"]) - else None - ) - validation_file = ( - fine_tune["validation_files"][0] - if fine_tune.get("validation_files") and len(fine_tune["validation_files"]) - else None - ) - for file, prefix, artifact_type in ( - (training_file, "train", "training_files"), - (validation_file, "valid", "validation_files"), - ): - if file is not None: - cls._log_artifact_inputs(file, prefix, artifact_type, project, entity) - - # fine-tune details - fine_tune_id = fine_tune.get("id") - artifact = wandb.Artifact( - "fine_tune_details", - type="fine_tune_details", - metadata=fine_tune, - ) - with artifact.new_file( - "fine_tune_details.json", mode="w", encoding="utf-8" - ) as f: - json.dump(fine_tune, f, indent=2) - wandb.run.log_artifact( - artifact, - aliases=["latest", fine_tune_id], - ) - - @classmethod - def _log_artifact_inputs(cls, file, prefix, artifact_type, project, entity): - file_id = file["id"] - filename = Path(file["filename"]).name - stem = Path(file["filename"]).stem - - # get input artifact - artifact_name = f"{prefix}-{filename}" - # sanitize name to valid wandb artifact name - artifact_name = re.sub(r"[^a-zA-Z0-9_\-.]", "_", artifact_name) - artifact_alias = file_id - artifact_path = f"{project}/{artifact_name}:{artifact_alias}" - if entity is not None: - artifact_path = f"{entity}/{artifact_path}" - artifact = cls._get_wandb_artifact(artifact_path) - - # create artifact if file not already logged previously - if artifact is None: - # get file content - try: - file_content = File.download(id=file_id).decode("utf-8") - except: - print( - f"File {file_id} could not be retrieved. Make sure you are allowed to download training/validation files" - ) - return - artifact = wandb.Artifact(artifact_name, type=artifact_type, metadata=file) - with artifact.new_file(filename, mode="w", encoding="utf-8") as f: - f.write(file_content) - - # create a Table - try: - table, n_items = cls._make_table(file_content) - artifact.add(table, stem) - wandb.config.update({f"n_{prefix}": n_items}) - artifact.metadata["items"] = n_items - except: - print(f"File {file_id} could not be read as a valid JSON file") - else: - # log number of items - wandb.config.update({f"n_{prefix}": artifact.metadata.get("items")}) - - wandb.run.use_artifact(artifact, aliases=["latest", artifact_alias]) - - @classmethod - def _make_table(cls, file_content): - df = pd.read_json(io.StringIO(file_content), orient="records", lines=True) - return wandb.Table(dataframe=df), len(df) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/period/test_pickle.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/period/test_pickle.py deleted file mode 100644 index cb981ab10064fbdc848f8e034b9fa372dcdf4b68..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/period/test_pickle.py +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np -import pytest - -from pandas import ( - NaT, - PeriodIndex, - period_range, -) -import pandas._testing as tm - -from pandas.tseries import offsets - - -class TestPickle: - @pytest.mark.parametrize("freq", ["D", "M", "A"]) - def test_pickle_round_trip(self, freq): - idx = PeriodIndex(["2016-05-16", "NaT", NaT, np.nan], freq=freq) - result = tm.round_trip_pickle(idx) - tm.assert_index_equal(result, idx) - - def test_pickle_freq(self): - # GH#2891 - prng = period_range("1/1/2011", "1/1/2012", freq="M") - new_prng = tm.round_trip_pickle(prng) - assert new_prng.freq == offsets.MonthEnd() - assert new_prng.freqstr == "M" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/util/test_deprecate_kwarg.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/util/test_deprecate_kwarg.py deleted file mode 100644 index b165e9fba0e4f2a9986ddc40c73aa1058d14a9ec..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/util/test_deprecate_kwarg.py +++ /dev/null @@ -1,90 +0,0 @@ -import pytest - -from pandas.util._decorators import deprecate_kwarg - -import pandas._testing as tm - - -@deprecate_kwarg("old", "new") -def _f1(new=False): - return new - - -_f2_mappings = {"yes": True, "no": False} - - -@deprecate_kwarg("old", "new", _f2_mappings) -def _f2(new=False): - return new - - -def _f3_mapping(x): - return x + 1 - - -@deprecate_kwarg("old", "new", _f3_mapping) -def _f3(new=0): - return new - - -@pytest.mark.parametrize("key,klass", [("old", FutureWarning), ("new", None)]) -def test_deprecate_kwarg(key, klass): - x = 78 - - with tm.assert_produces_warning(klass): - assert _f1(**{key: x}) == x - - -@pytest.mark.parametrize("key", list(_f2_mappings.keys())) -def test_dict_deprecate_kwarg(key): - with tm.assert_produces_warning(FutureWarning): - assert _f2(old=key) == _f2_mappings[key] - - -@pytest.mark.parametrize("key", ["bogus", 12345, -1.23]) -def test_missing_deprecate_kwarg(key): - with tm.assert_produces_warning(FutureWarning): - assert _f2(old=key) == key - - -@pytest.mark.parametrize("x", [1, -1.4, 0]) -def test_callable_deprecate_kwarg(x): - with tm.assert_produces_warning(FutureWarning): - assert _f3(old=x) == _f3_mapping(x) - - -def test_callable_deprecate_kwarg_fail(): - msg = "((can only|cannot) concatenate)|(must be str)|(Can't convert)" - - with pytest.raises(TypeError, match=msg): - _f3(old="hello") - - -def test_bad_deprecate_kwarg(): - msg = "mapping from old to new argument values must be dict or callable!" - - with pytest.raises(TypeError, match=msg): - - @deprecate_kwarg("old", "new", 0) - def f4(new=None): - return new - - -@deprecate_kwarg("old", None) -def _f4(old=True, unchanged=True): - return old, unchanged - - -@pytest.mark.parametrize("key", ["old", "unchanged"]) -def test_deprecate_keyword(key): - x = 9 - - if key == "old": - klass = FutureWarning - expected = (x, True) - else: - klass = None - expected = (True, x) - - with tm.assert_produces_warning(klass): - assert _f4(**{key: x}) == expected diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py deleted file mode 100644 index c56af390fe250c1048036375fff340db5d2807a8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py +++ /dev/null @@ -1,215 +0,0 @@ -from __future__ import absolute_import, division, unicode_literals -from pip._vendor.six import text_type - -from collections import OrderedDict - -from lxml import etree -from ..treebuilders.etree import tag_regexp - -from . import base - -from .. import _ihatexml - - -def ensure_str(s): - if s is None: - return None - elif isinstance(s, text_type): - return s - else: - return s.decode("ascii", "strict") - - -class Root(object): - def __init__(self, et): - self.elementtree = et - self.children = [] - - try: - if et.docinfo.internalDTD: - self.children.append(Doctype(self, - ensure_str(et.docinfo.root_name), - ensure_str(et.docinfo.public_id), - ensure_str(et.docinfo.system_url))) - except AttributeError: - pass - - try: - node = et.getroot() - except AttributeError: - node = et - - while node.getprevious() is not None: - node = node.getprevious() - while node is not None: - self.children.append(node) - node = node.getnext() - - self.text = None - self.tail = None - - def __getitem__(self, key): - return self.children[key] - - def getnext(self): - return None - - def __len__(self): - return 1 - - -class Doctype(object): - def __init__(self, root_node, name, public_id, system_id): - self.root_node = root_node - self.name = name - self.public_id = public_id - self.system_id = system_id - - self.text = None - self.tail = None - - def getnext(self): - return self.root_node.children[1] - - -class FragmentRoot(Root): - def __init__(self, children): - self.children = [FragmentWrapper(self, child) for child in children] - self.text = self.tail = None - - def getnext(self): - return None - - -class FragmentWrapper(object): - def __init__(self, fragment_root, obj): - self.root_node = fragment_root - self.obj = obj - if hasattr(self.obj, 'text'): - self.text = ensure_str(self.obj.text) - else: - self.text = None - if hasattr(self.obj, 'tail'): - self.tail = ensure_str(self.obj.tail) - else: - self.tail = None - - def __getattr__(self, name): - return getattr(self.obj, name) - - def getnext(self): - siblings = self.root_node.children - idx = siblings.index(self) - if idx < len(siblings) - 1: - return siblings[idx + 1] - else: - return None - - def __getitem__(self, key): - return self.obj[key] - - def __bool__(self): - return bool(self.obj) - - def getparent(self): - return None - - def __str__(self): - return str(self.obj) - - def __unicode__(self): - return str(self.obj) - - def __len__(self): - return len(self.obj) - - -class TreeWalker(base.NonRecursiveTreeWalker): - def __init__(self, tree): - # pylint:disable=redefined-variable-type - if isinstance(tree, list): - self.fragmentChildren = set(tree) - tree = FragmentRoot(tree) - else: - self.fragmentChildren = set() - tree = Root(tree) - base.NonRecursiveTreeWalker.__init__(self, tree) - self.filter = _ihatexml.InfosetFilter() - - def getNodeDetails(self, node): - if isinstance(node, tuple): # Text node - node, key = node - assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - return base.TEXT, ensure_str(getattr(node, key)) - - elif isinstance(node, Root): - return (base.DOCUMENT,) - - elif isinstance(node, Doctype): - return base.DOCTYPE, node.name, node.public_id, node.system_id - - elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"): - return base.TEXT, ensure_str(node.obj) - - elif node.tag == etree.Comment: - return base.COMMENT, ensure_str(node.text) - - elif node.tag == etree.Entity: - return base.ENTITY, ensure_str(node.text)[1:-1] # strip &; - - else: - # This is assumed to be an ordinary element - match = tag_regexp.match(ensure_str(node.tag)) - if match: - namespace, tag = match.groups() - else: - namespace = None - tag = ensure_str(node.tag) - attrs = OrderedDict() - for name, value in list(node.attrib.items()): - name = ensure_str(name) - value = ensure_str(value) - match = tag_regexp.match(name) - if match: - attrs[(match.group(1), match.group(2))] = value - else: - attrs[(None, name)] = value - return (base.ELEMENT, namespace, self.filter.fromXmlName(tag), - attrs, len(node) > 0 or node.text) - - def getFirstChild(self, node): - assert not isinstance(node, tuple), "Text nodes have no children" - - assert len(node) or node.text, "Node has no children" - if node.text: - return (node, "text") - else: - return node[0] - - def getNextSibling(self, node): - if isinstance(node, tuple): # Text node - node, key = node - assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - if key == "text": - # XXX: we cannot use a "bool(node) and node[0] or None" construct here - # because node[0] might evaluate to False if it has no child element - if len(node): - return node[0] - else: - return None - else: # tail - return node.getnext() - - return (node, "tail") if node.tail else node.getnext() - - def getParentNode(self, node): - if isinstance(node, tuple): # Text node - node, key = node - assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key - if key == "text": - return node - # else: fallback to "normal" processing - elif node in self.fragmentChildren: - return None - - return node.getparent() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/ambient.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/ambient.py deleted file mode 100644 index deba0f3b0d2f5edba16ccfa337757697a7f72f32..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/ambient.py +++ /dev/null @@ -1,76 +0,0 @@ -""" - pygments.lexers.ambient - ~~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for AmbientTalk language. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pygments.lexer import RegexLexer, include, words, bygroups -from pygments.token import Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Whitespace - -__all__ = ['AmbientTalkLexer'] - - -class AmbientTalkLexer(RegexLexer): - """ - Lexer for AmbientTalk source code. - - .. versionadded:: 2.0 - """ - name = 'AmbientTalk' - url = 'https://code.google.com/p/ambienttalk' - filenames = ['*.at'] - aliases = ['ambienttalk', 'ambienttalk/2', 'at'] - mimetypes = ['text/x-ambienttalk'] - - flags = re.MULTILINE | re.DOTALL - - builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:', - 'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:', - 'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:', - 'mirroredBy:', 'is:')) - tokens = { - 'root': [ - (r'\s+', Whitespace), - (r'//.*?\n', Comment.Single), - (r'/\*.*?\*/', Comment.Multiline), - (r'(def|deftype|import|alias|exclude)\b', Keyword), - (builtin, Name.Builtin), - (r'(true|false|nil)\b', Keyword.Constant), - (r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'), - (r'"(\\\\|\\[^\\]|[^"\\])*"', String), - (r'\|', Punctuation, 'arglist'), - (r'<:|[*^!%&<>+=,./?-]|:=', Operator), - (r"`[a-zA-Z_]\w*", String.Symbol), - (r"[a-zA-Z_]\w*:", Name.Function), - (r"[{}()\[\];`]", Punctuation), - (r'(self|super)\b', Name.Variable.Instance), - (r"[a-zA-Z_]\w*", Name.Variable), - (r"@[a-zA-Z_]\w*", Name.Class), - (r"@\[", Name.Class, 'annotations'), - include('numbers'), - ], - 'numbers': [ - (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), - (r'\d+', Number.Integer) - ], - 'namespace': [ - (r'[a-zA-Z_]\w*\.', Name.Namespace), - (r'[a-zA-Z_]\w*:', Name.Function, '#pop'), - (r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop') - ], - 'annotations': [ - (r"(.*?)\]", Name.Class, '#pop') - ], - 'arglist': [ - (r'\|', Punctuation, '#pop'), - (r'(\s*)(,)(\s*)', bygroups(Whitespace, Punctuation, Whitespace)), - (r'[a-zA-Z_]\w*', Name.Variable), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/carbon.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/carbon.py deleted file mode 100644 index 758e8af361966fe551c334fbc87d3b501250dc25..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/carbon.py +++ /dev/null @@ -1,96 +0,0 @@ -""" - pygments.lexers.carbon - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for the Carbon programming language. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" -import re - -from pygments.lexer import RegexLexer, bygroups, words -from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ - Number, Punctuation, Whitespace - -__all__ = ['CarbonLexer'] - - -class CarbonLexer(RegexLexer): - """ - For Carbon source. - - .. versionadded:: 2.15 - """ - name = 'Carbon' - url = 'https://github.com/carbon-language/carbon-lang' - filenames = ['*.carbon'] - aliases = ['carbon'] - mimetypes = ['text/x-carbon'] - - flags = re.MULTILINE | re.DOTALL - - tokens = { - 'root': [ - (r'\n', Whitespace), - (r'\s+', Whitespace), - (r'\\\n', Text), - # comments - (r'//(.*?)\n', Comment.Single), - (r'/(\\\n)?[*].*?[*](\\\n)?/', Comment.Multiline), - # Declaration - (r'(package|import|api|namespace|library)\b', Keyword.Namespace), - (r'(abstract|alias|fn|class|interface|let|var|virtual|external|' - r'base|addr|extends|choice|constraint|impl)\b', Keyword.Declaration), - # Keywords - (words(('as', 'or', 'not', 'and', 'break', 'continue', 'case', - 'default', 'if', 'else', 'destructor', 'for', 'forall', - 'while', 'where', 'then', 'in', 'is', 'return', 'returned', - 'friend', 'partial', 'private', 'protected', 'observe', 'Self', - 'override', 'final', 'match', 'type', 'like'), suffix=r'\b'), Keyword), - (r'(self)\b', Keyword.Pseudo), - (r'(true|false)\b', Keyword.Constant), - (r'(auto|bool|string|i8|i16|i32|i64|u8|u16|u32|u64|' - r'f8|f16|f32|f64)\b', Keyword.Type), - # numeric literals - (r'[0-9]*[.][0-9]+', Number.Double), - (r'0b[01]+', Number.Bin), - (r'0o[0-7]+', Number.Oct), - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+', Number.Integer), - # string literal - (r'"(\\.|[^"\\])*"', String), - # char literal - (r'\'(\\.|[^\'\\])\'', String.Char), - # tokens - (r'<<=|>>=|<<|>>|<=|>=|\+=|-=|\*=|/=|\%=|\|=|&=|\^=|&&|\|\||&|\||' - r'\+\+|--|\%|\^|\~|==|!=|::|[.]{3}|->|=>|[+\-*/&]', Operator), - (r'[|<>=!()\[\]{}.,;:\?]', Punctuation), - # identifiers - (r'[^\W\d]\w*', Name.Other), - ] - } - - def analyse_text(text): - result = 0 - if 'forall' in text: - result += 0.1 - if 'type' in text: - result += 0.1 - if 'Self' in text: - result += 0.1 - if 'observe' in text: - result += 0.1 - if 'package' in text: - result += 0.1 - if 'library' in text: - result += 0.1 - if 'choice' in text: - result += 0.1 - if 'addr' in text: - result += 0.1 - if 'constraint' in text: - result += 0.1 - if 'impl' in text: - result += 0.1 - return result diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/starlette/testclient.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/starlette/testclient.py deleted file mode 100644 index a66b901d64782f71d0434c717682cb62aa1d5cf1..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/starlette/testclient.py +++ /dev/null @@ -1,797 +0,0 @@ -import contextlib -import inspect -import io -import json -import math -import queue -import sys -import typing -import warnings -from concurrent.futures import Future -from types import GeneratorType -from urllib.parse import unquote, urljoin - -import anyio -import anyio.from_thread -import httpx -from anyio.streams.stapled import StapledObjectStream - -from starlette._utils import is_async_callable -from starlette.types import ASGIApp, Message, Receive, Scope, Send -from starlette.websockets import WebSocketDisconnect - -if sys.version_info >= (3, 8): # pragma: no cover - from typing import TypedDict -else: # pragma: no cover - from typing_extensions import TypedDict - -_PortalFactoryType = typing.Callable[ - [], typing.ContextManager[anyio.abc.BlockingPortal] -] - -ASGIInstance = typing.Callable[[Receive, Send], typing.Awaitable[None]] -ASGI2App = typing.Callable[[Scope], ASGIInstance] -ASGI3App = typing.Callable[[Scope, Receive, Send], typing.Awaitable[None]] - - -_RequestData = typing.Mapping[str, typing.Union[str, typing.Iterable[str]]] - - -def _is_asgi3(app: typing.Union[ASGI2App, ASGI3App]) -> bool: - if inspect.isclass(app): - return hasattr(app, "__await__") - return is_async_callable(app) - - -class _WrapASGI2: - """ - Provide an ASGI3 interface onto an ASGI2 app. - """ - - def __init__(self, app: ASGI2App) -> None: - self.app = app - - async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: - instance = self.app(scope) - await instance(receive, send) - - -class _AsyncBackend(TypedDict): - backend: str - backend_options: typing.Dict[str, typing.Any] - - -class _Upgrade(Exception): - def __init__(self, session: "WebSocketTestSession") -> None: - self.session = session - - -class WebSocketTestSession: - def __init__( - self, - app: ASGI3App, - scope: Scope, - portal_factory: _PortalFactoryType, - ) -> None: - self.app = app - self.scope = scope - self.accepted_subprotocol = None - self.portal_factory = portal_factory - self._receive_queue: "queue.Queue[typing.Any]" = queue.Queue() - self._send_queue: "queue.Queue[typing.Any]" = queue.Queue() - self.extra_headers = None - - def __enter__(self) -> "WebSocketTestSession": - self.exit_stack = contextlib.ExitStack() - self.portal = self.exit_stack.enter_context(self.portal_factory()) - - try: - _: "Future[None]" = self.portal.start_task_soon(self._run) - self.send({"type": "websocket.connect"}) - message = self.receive() - self._raise_on_close(message) - except Exception: - self.exit_stack.close() - raise - self.accepted_subprotocol = message.get("subprotocol", None) - self.extra_headers = message.get("headers", None) - return self - - def __exit__(self, *args: typing.Any) -> None: - try: - self.close(1000) - finally: - self.exit_stack.close() - while not self._send_queue.empty(): - message = self._send_queue.get() - if isinstance(message, BaseException): - raise message - - async def _run(self) -> None: - """ - The sub-thread in which the websocket session runs. - """ - scope = self.scope - receive = self._asgi_receive - send = self._asgi_send - try: - await self.app(scope, receive, send) - except BaseException as exc: - self._send_queue.put(exc) - raise - - async def _asgi_receive(self) -> Message: - while self._receive_queue.empty(): - await anyio.sleep(0) - return self._receive_queue.get() - - async def _asgi_send(self, message: Message) -> None: - self._send_queue.put(message) - - def _raise_on_close(self, message: Message) -> None: - if message["type"] == "websocket.close": - raise WebSocketDisconnect( - message.get("code", 1000), message.get("reason", "") - ) - - def send(self, message: Message) -> None: - self._receive_queue.put(message) - - def send_text(self, data: str) -> None: - self.send({"type": "websocket.receive", "text": data}) - - def send_bytes(self, data: bytes) -> None: - self.send({"type": "websocket.receive", "bytes": data}) - - def send_json(self, data: typing.Any, mode: str = "text") -> None: - assert mode in ["text", "binary"] - text = json.dumps(data, separators=(",", ":")) - if mode == "text": - self.send({"type": "websocket.receive", "text": text}) - else: - self.send({"type": "websocket.receive", "bytes": text.encode("utf-8")}) - - def close(self, code: int = 1000) -> None: - self.send({"type": "websocket.disconnect", "code": code}) - - def receive(self) -> Message: - message = self._send_queue.get() - if isinstance(message, BaseException): - raise message - return message - - def receive_text(self) -> str: - message = self.receive() - self._raise_on_close(message) - return message["text"] - - def receive_bytes(self) -> bytes: - message = self.receive() - self._raise_on_close(message) - return message["bytes"] - - def receive_json(self, mode: str = "text") -> typing.Any: - assert mode in ["text", "binary"] - message = self.receive() - self._raise_on_close(message) - if mode == "text": - text = message["text"] - else: - text = message["bytes"].decode("utf-8") - return json.loads(text) - - -class _TestClientTransport(httpx.BaseTransport): - def __init__( - self, - app: ASGI3App, - portal_factory: _PortalFactoryType, - raise_server_exceptions: bool = True, - root_path: str = "", - *, - app_state: typing.Dict[str, typing.Any], - ) -> None: - self.app = app - self.raise_server_exceptions = raise_server_exceptions - self.root_path = root_path - self.portal_factory = portal_factory - self.app_state = app_state - - def handle_request(self, request: httpx.Request) -> httpx.Response: - scheme = request.url.scheme - netloc = request.url.netloc.decode(encoding="ascii") - path = request.url.path - raw_path = request.url.raw_path - query = request.url.query.decode(encoding="ascii") - - default_port = {"http": 80, "ws": 80, "https": 443, "wss": 443}[scheme] - - if ":" in netloc: - host, port_string = netloc.split(":", 1) - port = int(port_string) - else: - host = netloc - port = default_port - - # Include the 'host' header. - if "host" in request.headers: - headers: typing.List[typing.Tuple[bytes, bytes]] = [] - elif port == default_port: # pragma: no cover - headers = [(b"host", host.encode())] - else: # pragma: no cover - headers = [(b"host", (f"{host}:{port}").encode())] - - # Include other request headers. - headers += [ - (key.lower().encode(), value.encode()) - for key, value in request.headers.items() - ] - - scope: typing.Dict[str, typing.Any] - - if scheme in {"ws", "wss"}: - subprotocol = request.headers.get("sec-websocket-protocol", None) - if subprotocol is None: - subprotocols: typing.Sequence[str] = [] - else: - subprotocols = [value.strip() for value in subprotocol.split(",")] - scope = { - "type": "websocket", - "path": unquote(path), - "raw_path": raw_path, - "root_path": self.root_path, - "scheme": scheme, - "query_string": query.encode(), - "headers": headers, - "client": ["testclient", 50000], - "server": [host, port], - "subprotocols": subprotocols, - "state": self.app_state.copy(), - } - session = WebSocketTestSession(self.app, scope, self.portal_factory) - raise _Upgrade(session) - - scope = { - "type": "http", - "http_version": "1.1", - "method": request.method, - "path": unquote(path), - "raw_path": raw_path, - "root_path": self.root_path, - "scheme": scheme, - "query_string": query.encode(), - "headers": headers, - "client": ["testclient", 50000], - "server": [host, port], - "extensions": {"http.response.debug": {}}, - "state": self.app_state.copy(), - } - - request_complete = False - response_started = False - response_complete: anyio.Event - raw_kwargs: typing.Dict[str, typing.Any] = {"stream": io.BytesIO()} - template = None - context = None - - async def receive() -> Message: - nonlocal request_complete - - if request_complete: - if not response_complete.is_set(): - await response_complete.wait() - return {"type": "http.disconnect"} - - body = request.read() - if isinstance(body, str): - body_bytes: bytes = body.encode("utf-8") # pragma: no cover - elif body is None: - body_bytes = b"" # pragma: no cover - elif isinstance(body, GeneratorType): - try: # pragma: no cover - chunk = body.send(None) - if isinstance(chunk, str): - chunk = chunk.encode("utf-8") - return {"type": "http.request", "body": chunk, "more_body": True} - except StopIteration: # pragma: no cover - request_complete = True - return {"type": "http.request", "body": b""} - else: - body_bytes = body - - request_complete = True - return {"type": "http.request", "body": body_bytes} - - async def send(message: Message) -> None: - nonlocal raw_kwargs, response_started, template, context - - if message["type"] == "http.response.start": - assert ( - not response_started - ), 'Received multiple "http.response.start" messages.' - raw_kwargs["status_code"] = message["status"] - raw_kwargs["headers"] = [ - (key.decode(), value.decode()) - for key, value in message.get("headers", []) - ] - response_started = True - elif message["type"] == "http.response.body": - assert ( - response_started - ), 'Received "http.response.body" without "http.response.start".' - assert ( - not response_complete.is_set() - ), 'Received "http.response.body" after response completed.' - body = message.get("body", b"") - more_body = message.get("more_body", False) - if request.method != "HEAD": - raw_kwargs["stream"].write(body) - if not more_body: - raw_kwargs["stream"].seek(0) - response_complete.set() - elif message["type"] == "http.response.debug": - template = message["info"]["template"] - context = message["info"]["context"] - - try: - with self.portal_factory() as portal: - response_complete = portal.call(anyio.Event) - portal.call(self.app, scope, receive, send) - except BaseException as exc: - if self.raise_server_exceptions: - raise exc - - if self.raise_server_exceptions: - assert response_started, "TestClient did not receive any response." - elif not response_started: - raw_kwargs = { - "status_code": 500, - "headers": [], - "stream": io.BytesIO(), - } - - raw_kwargs["stream"] = httpx.ByteStream(raw_kwargs["stream"].read()) - - response = httpx.Response(**raw_kwargs, request=request) - if template is not None: - response.template = template # type: ignore[attr-defined] - response.context = context # type: ignore[attr-defined] - return response - - -class TestClient(httpx.Client): - __test__ = False - task: "Future[None]" - portal: typing.Optional[anyio.abc.BlockingPortal] = None - - def __init__( - self, - app: ASGIApp, - base_url: str = "http://testserver", - raise_server_exceptions: bool = True, - root_path: str = "", - backend: str = "asyncio", - backend_options: typing.Optional[typing.Dict[str, typing.Any]] = None, - cookies: httpx._client.CookieTypes = None, - headers: typing.Dict[str, str] = None, - ) -> None: - self.async_backend = _AsyncBackend( - backend=backend, backend_options=backend_options or {} - ) - if _is_asgi3(app): - app = typing.cast(ASGI3App, app) - asgi_app = app - else: - app = typing.cast(ASGI2App, app) # type: ignore[assignment] - asgi_app = _WrapASGI2(app) # type: ignore[arg-type] - self.app = asgi_app - self.app_state: typing.Dict[str, typing.Any] = {} - transport = _TestClientTransport( - self.app, - portal_factory=self._portal_factory, - raise_server_exceptions=raise_server_exceptions, - root_path=root_path, - app_state=self.app_state, - ) - if headers is None: - headers = {} - headers.setdefault("user-agent", "testclient") - super().__init__( - app=self.app, - base_url=base_url, - headers=headers, - transport=transport, - follow_redirects=True, - cookies=cookies, - ) - - @contextlib.contextmanager - def _portal_factory(self) -> typing.Generator[anyio.abc.BlockingPortal, None, None]: - if self.portal is not None: - yield self.portal - else: - with anyio.from_thread.start_blocking_portal( - **self.async_backend - ) as portal: - yield portal - - def _choose_redirect_arg( - self, - follow_redirects: typing.Optional[bool], - allow_redirects: typing.Optional[bool], - ) -> typing.Union[bool, httpx._client.UseClientDefault]: - redirect: typing.Union[ - bool, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT - if allow_redirects is not None: - message = ( - "The `allow_redirects` argument is deprecated. " - "Use `follow_redirects` instead." - ) - warnings.warn(message, DeprecationWarning) - redirect = allow_redirects - if follow_redirects is not None: - redirect = follow_redirects - elif allow_redirects is not None and follow_redirects is not None: - raise RuntimeError( # pragma: no cover - "Cannot use both `allow_redirects` and `follow_redirects`." - ) - return redirect - - def request( # type: ignore[override] - self, - method: str, - url: httpx._types.URLTypes, - *, - content: typing.Optional[httpx._types.RequestContent] = None, - data: typing.Optional[_RequestData] = None, - files: typing.Optional[httpx._types.RequestFiles] = None, - json: typing.Any = None, - params: typing.Optional[httpx._types.QueryParamTypes] = None, - headers: typing.Optional[httpx._types.HeaderTypes] = None, - cookies: typing.Optional[httpx._types.CookieTypes] = None, - auth: typing.Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - follow_redirects: typing.Optional[bool] = None, - allow_redirects: typing.Optional[bool] = None, - timeout: typing.Union[ - httpx._client.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, - ) -> httpx.Response: - url = self.base_url.join(url) - redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) - return super().request( - method, - url, - content=content, - data=data, # type: ignore[arg-type] - files=files, - json=json, - params=params, - headers=headers, - cookies=cookies, - auth=auth, - follow_redirects=redirect, - timeout=timeout, - extensions=extensions, - ) - - def get( # type: ignore[override] - self, - url: httpx._types.URLTypes, - *, - params: typing.Optional[httpx._types.QueryParamTypes] = None, - headers: typing.Optional[httpx._types.HeaderTypes] = None, - cookies: typing.Optional[httpx._types.CookieTypes] = None, - auth: typing.Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - follow_redirects: typing.Optional[bool] = None, - allow_redirects: typing.Optional[bool] = None, - timeout: typing.Union[ - httpx._client.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, - ) -> httpx.Response: - redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) - return super().get( - url, - params=params, - headers=headers, - cookies=cookies, - auth=auth, - follow_redirects=redirect, - timeout=timeout, - extensions=extensions, - ) - - def options( # type: ignore[override] - self, - url: httpx._types.URLTypes, - *, - params: typing.Optional[httpx._types.QueryParamTypes] = None, - headers: typing.Optional[httpx._types.HeaderTypes] = None, - cookies: typing.Optional[httpx._types.CookieTypes] = None, - auth: typing.Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - follow_redirects: typing.Optional[bool] = None, - allow_redirects: typing.Optional[bool] = None, - timeout: typing.Union[ - httpx._client.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, - ) -> httpx.Response: - redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) - return super().options( - url, - params=params, - headers=headers, - cookies=cookies, - auth=auth, - follow_redirects=redirect, - timeout=timeout, - extensions=extensions, - ) - - def head( # type: ignore[override] - self, - url: httpx._types.URLTypes, - *, - params: typing.Optional[httpx._types.QueryParamTypes] = None, - headers: typing.Optional[httpx._types.HeaderTypes] = None, - cookies: typing.Optional[httpx._types.CookieTypes] = None, - auth: typing.Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - follow_redirects: typing.Optional[bool] = None, - allow_redirects: typing.Optional[bool] = None, - timeout: typing.Union[ - httpx._client.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, - ) -> httpx.Response: - redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) - return super().head( - url, - params=params, - headers=headers, - cookies=cookies, - auth=auth, - follow_redirects=redirect, - timeout=timeout, - extensions=extensions, - ) - - def post( # type: ignore[override] - self, - url: httpx._types.URLTypes, - *, - content: typing.Optional[httpx._types.RequestContent] = None, - data: typing.Optional[_RequestData] = None, - files: typing.Optional[httpx._types.RequestFiles] = None, - json: typing.Any = None, - params: typing.Optional[httpx._types.QueryParamTypes] = None, - headers: typing.Optional[httpx._types.HeaderTypes] = None, - cookies: typing.Optional[httpx._types.CookieTypes] = None, - auth: typing.Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - follow_redirects: typing.Optional[bool] = None, - allow_redirects: typing.Optional[bool] = None, - timeout: typing.Union[ - httpx._client.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, - ) -> httpx.Response: - redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) - return super().post( - url, - content=content, - data=data, # type: ignore[arg-type] - files=files, - json=json, - params=params, - headers=headers, - cookies=cookies, - auth=auth, - follow_redirects=redirect, - timeout=timeout, - extensions=extensions, - ) - - def put( # type: ignore[override] - self, - url: httpx._types.URLTypes, - *, - content: typing.Optional[httpx._types.RequestContent] = None, - data: typing.Optional[_RequestData] = None, - files: typing.Optional[httpx._types.RequestFiles] = None, - json: typing.Any = None, - params: typing.Optional[httpx._types.QueryParamTypes] = None, - headers: typing.Optional[httpx._types.HeaderTypes] = None, - cookies: typing.Optional[httpx._types.CookieTypes] = None, - auth: typing.Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - follow_redirects: typing.Optional[bool] = None, - allow_redirects: typing.Optional[bool] = None, - timeout: typing.Union[ - httpx._client.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, - ) -> httpx.Response: - redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) - return super().put( - url, - content=content, - data=data, # type: ignore[arg-type] - files=files, - json=json, - params=params, - headers=headers, - cookies=cookies, - auth=auth, - follow_redirects=redirect, - timeout=timeout, - extensions=extensions, - ) - - def patch( # type: ignore[override] - self, - url: httpx._types.URLTypes, - *, - content: typing.Optional[httpx._types.RequestContent] = None, - data: typing.Optional[_RequestData] = None, - files: typing.Optional[httpx._types.RequestFiles] = None, - json: typing.Any = None, - params: typing.Optional[httpx._types.QueryParamTypes] = None, - headers: typing.Optional[httpx._types.HeaderTypes] = None, - cookies: typing.Optional[httpx._types.CookieTypes] = None, - auth: typing.Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - follow_redirects: typing.Optional[bool] = None, - allow_redirects: typing.Optional[bool] = None, - timeout: typing.Union[ - httpx._client.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, - ) -> httpx.Response: - redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) - return super().patch( - url, - content=content, - data=data, # type: ignore[arg-type] - files=files, - json=json, - params=params, - headers=headers, - cookies=cookies, - auth=auth, - follow_redirects=redirect, - timeout=timeout, - extensions=extensions, - ) - - def delete( # type: ignore[override] - self, - url: httpx._types.URLTypes, - *, - params: typing.Optional[httpx._types.QueryParamTypes] = None, - headers: typing.Optional[httpx._types.HeaderTypes] = None, - cookies: typing.Optional[httpx._types.CookieTypes] = None, - auth: typing.Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - follow_redirects: typing.Optional[bool] = None, - allow_redirects: typing.Optional[bool] = None, - timeout: typing.Union[ - httpx._client.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx._client.USE_CLIENT_DEFAULT, - extensions: typing.Optional[typing.Dict[str, typing.Any]] = None, - ) -> httpx.Response: - redirect = self._choose_redirect_arg(follow_redirects, allow_redirects) - return super().delete( - url, - params=params, - headers=headers, - cookies=cookies, - auth=auth, - follow_redirects=redirect, - timeout=timeout, - extensions=extensions, - ) - - def websocket_connect( - self, url: str, subprotocols: typing.Sequence[str] = None, **kwargs: typing.Any - ) -> typing.Any: - url = urljoin("ws://testserver", url) - headers = kwargs.get("headers", {}) - headers.setdefault("connection", "upgrade") - headers.setdefault("sec-websocket-key", "testserver==") - headers.setdefault("sec-websocket-version", "13") - if subprotocols is not None: - headers.setdefault("sec-websocket-protocol", ", ".join(subprotocols)) - kwargs["headers"] = headers - try: - super().request("GET", url, **kwargs) - except _Upgrade as exc: - session = exc.session - else: - raise RuntimeError("Expected WebSocket upgrade") # pragma: no cover - - return session - - def __enter__(self) -> "TestClient": - with contextlib.ExitStack() as stack: - self.portal = portal = stack.enter_context( - anyio.from_thread.start_blocking_portal(**self.async_backend) - ) - - @stack.callback - def reset_portal() -> None: - self.portal = None - - self.stream_send = StapledObjectStream( - *anyio.create_memory_object_stream(math.inf) - ) - self.stream_receive = StapledObjectStream( - *anyio.create_memory_object_stream(math.inf) - ) - self.task = portal.start_task_soon(self.lifespan) - portal.call(self.wait_startup) - - @stack.callback - def wait_shutdown() -> None: - portal.call(self.wait_shutdown) - - self.exit_stack = stack.pop_all() - - return self - - def __exit__(self, *args: typing.Any) -> None: - self.exit_stack.close() - - async def lifespan(self) -> None: - scope = {"type": "lifespan", "state": self.app_state} - try: - await self.app(scope, self.stream_receive.receive, self.stream_send.send) - finally: - await self.stream_send.send(None) - - async def wait_startup(self) -> None: - await self.stream_receive.send({"type": "lifespan.startup"}) - - async def receive() -> typing.Any: - message = await self.stream_send.receive() - if message is None: - self.task.result() - return message - - message = await receive() - assert message["type"] in ( - "lifespan.startup.complete", - "lifespan.startup.failed", - ) - if message["type"] == "lifespan.startup.failed": - await receive() - - async def wait_shutdown(self) -> None: - async def receive() -> typing.Any: - message = await self.stream_send.receive() - if message is None: - self.task.result() - return message - - async with self.stream_send: - await self.stream_receive.send({"type": "lifespan.shutdown"}) - message = await receive() - assert message["type"] in ( - "lifespan.shutdown.complete", - "lifespan.shutdown.failed", - ) - if message["type"] == "lifespan.shutdown.failed": - await receive() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/uvicorn/lifespan/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/uvicorn/lifespan/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/pyInter/Liyuu_sovits4/data_utils.py b/spaces/pyInter/Liyuu_sovits4/data_utils.py deleted file mode 100644 index bd67adc7d42da7b9ff4ca11e543d8cc9cd34e60b..0000000000000000000000000000000000000000 --- a/spaces/pyInter/Liyuu_sovits4/data_utils.py +++ /dev/null @@ -1,142 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import modules.commons as commons -import utils -from modules.mel_processing import spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text - -# import h5py - - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.spec_len = hparams.train.max_speclen - self.spk_map = hparams.spk - - random.seed(1234) - random.shuffle(self.audiopaths) - - def get_audio(self, filename): - filename = filename.replace("\\", "/") - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split("/")[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - f0 = np.load(filename + ".f0.npy") - f0, uv = utils.interpolate_f0(f0) - f0 = torch.FloatTensor(f0) - uv = torch.FloatTensor(uv) - - c = torch.load(filename+ ".soft.pt") - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[0]) - - - lmin = min(c.size(-1), spec.size(-1)) - assert abs(c.size(-1) - spec.size(-1)) < 3, (c.size(-1), spec.size(-1), f0.shape, filename) - assert abs(audio_norm.shape[1]-lmin * self.hop_length) < 3 * self.hop_length - spec, c, f0, uv = spec[:, :lmin], c[:, :lmin], f0[:lmin], uv[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - if spec.shape[1] < 60: - print("skip too short audio:", filename) - return None - if spec.shape[1] > 800: - start = random.randint(0, spec.shape[1]-800) - end = start + 790 - spec, c, f0, uv = spec[:, start:end], c[:, start:end], f0[start:end], uv[start:end] - audio_norm = audio_norm[:, start * self.hop_length : end * self.hop_length] - - return c, f0, spec, audio_norm, spk, uv - - def __getitem__(self, index): - return self.get_audio(self.audiopaths[index][0]) - - def __len__(self): - return len(self.audiopaths) - - -class TextAudioCollate: - - def __call__(self, batch): - batch = [b for b in batch if b is not None] - - input_lengths, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[0].shape[1] for x in batch]), - dim=0, descending=True) - - max_c_len = max([x[0].size(1) for x in batch]) - max_wav_len = max([x[3].size(1) for x in batch]) - - lengths = torch.LongTensor(len(batch)) - - c_padded = torch.FloatTensor(len(batch), batch[0][0].shape[0], max_c_len) - f0_padded = torch.FloatTensor(len(batch), max_c_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][2].shape[0], max_c_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - spkids = torch.LongTensor(len(batch), 1) - uv_padded = torch.FloatTensor(len(batch), max_c_len) - - c_padded.zero_() - spec_padded.zero_() - f0_padded.zero_() - wav_padded.zero_() - uv_padded.zero_() - - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - c = row[0] - c_padded[i, :, :c.size(1)] = c - lengths[i] = c.size(1) - - f0 = row[1] - f0_padded[i, :f0.size(0)] = f0 - - spec = row[2] - spec_padded[i, :, :spec.size(1)] = spec - - wav = row[3] - wav_padded[i, :, :wav.size(1)] = wav - - spkids[i, 0] = row[4] - - uv = row[5] - uv_padded[i, :uv.size(0)] = uv - - return c_padded, f0_padded, spec_padded, wav_padded, spkids, lengths, uv_padded diff --git a/spaces/pycoming/bingo/src/components/chat-header.tsx b/spaces/pycoming/bingo/src/components/chat-header.tsx deleted file mode 100644 index c6664b8dee61179f844d45c5bd650518fc2cb4c2..0000000000000000000000000000000000000000 --- a/spaces/pycoming/bingo/src/components/chat-header.tsx +++ /dev/null @@ -1,12 +0,0 @@ -import LogoIcon from '@/assets/images/logo.svg' -import Image from 'next/image' - -export function ChatHeader() { - return ( -
    - logo -
    欢迎使用新必应
    -
    由 AI 支持的网页版 Copilot
    -
    - ) -} diff --git a/spaces/pycui/RealChar/client/web/src/components/MediaDevices/style.css b/spaces/pycui/RealChar/client/web/src/components/MediaDevices/style.css deleted file mode 100644 index 3d4dd479e71f57abd63bdfbcb042eb4fc9a5fd5b..0000000000000000000000000000000000000000 --- a/spaces/pycui/RealChar/client/web/src/components/MediaDevices/style.css +++ /dev/null @@ -1,54 +0,0 @@ -.devices-container { - display: flex; - align-items: center; - justify-content: center; - flex-direction: column; -} - -.audio-device-label { - margin-top: 40px; - margin-bottom: 20px; - color: #e0e0e0; -} - -/* media devices select */ -.select-dropdown, -.select-dropdown * { - margin: 0; - padding: 0; - position: relative; - box-sizing: border-box; -} -.select-dropdown { - position: relative; - background-color: #02081d; - border-radius: 4px; -} -.select-dropdown select { - font-size: 1rem; - font-weight: normal; - color: white; - max-width: 100%; - padding: 8px 24px 8px 10px; - border-radius: 10px; - background-color: transparent; - -webkit-appearance: none; - -moz-appearance: none; - appearance: none; -} -.select-dropdown select:active, .select-dropdown select:focus { - outline: none; - box-shadow: none; -} -.select-dropdown:after { - content: ""; - position: absolute; - top: 50%; - right: 8px; - width: 0; - height: 0; - margin-top: -2px; - border-top: 5px solid #aaa; - border-right: 5px solid transparent; - border-left: 5px solid transparent; -} diff --git a/spaces/qingxu98/academic-chatgpt-beta/docs/README_FR.md b/spaces/qingxu98/academic-chatgpt-beta/docs/README_FR.md deleted file mode 100644 index f21e90035ef2ddea91382155e0ad46b6740f5322..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/academic-chatgpt-beta/docs/README_FR.md +++ /dev/null @@ -1,296 +0,0 @@ -> **Note** -> -> Ce fichier README est généré automatiquement par le plugin de traduction markdown de ce projet et n'est peut - être pas correct à 100%. -> - -# ChatGPT Optimisation Académique - -**Si vous aimez ce projet, donnez-lui une étoile; si vous avez inventé des raccourcis académiques plus utiles ou des plugins fonctionnels, n'hésitez pas à ouvrir une demande ou une demande de traction. Nous avons également un fichier README en [anglais|](docs/README_EN.md)[japonais|](docs/README_JP.md)[russe|](docs/README_RS.md)[français](docs/README_FR.md) traduit par ce projet lui-même.** - -> **Note** -> -> 1. Veuillez noter que seuls les plugins de fonction signalés en **rouge** sont capables de lire les fichiers, certains plugins se trouvent dans le **menu déroulant** de la section plugin. Nous sommes également les bienvenus avec la plus haute priorité pour traiter et accepter tout nouveau PR de plugin! -> -> 2. Chaque fichier dans ce projet est expliqué en détail dans l'auto-analyse [self_analysis.md](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Avec l'itération des versions, vous pouvez également cliquer sur les plugins fonctionnels pertinents pour appeler GPT et générer un rapport d'auto-analyse projet mis à jour. Les questions fréquemment posées sont résumées dans le [wiki](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). -> - -
    - -Fonctionnalité | Description ---- | --- -Polissage en un clic | Prend en charge la correction en un clic et la recherche d'erreurs de syntaxe dans les documents de recherche. -Traduction Chinois-Anglais en un clic | Une touche pour traduire la partie chinoise en anglais ou celle anglaise en chinois. -Explication de code en un clic | Affiche et explique correctement le code. -[Raccourcis clavier personnalisables](https://www.bilibili.com/video/BV14s4y1E7jN) | Prend en charge les raccourcis clavier personnalisables. -[Configuration du serveur proxy](https://www.bilibili.com/video/BV1rc411W7Dr) | Prend en charge la configuration du serveur proxy. -Conception modulaire | Prend en charge la personnalisation des plugins de fonctions et des [plugins] de fonctions hiérarchiques personnalisés, et les plugins prennent en charge [la mise à jour à chaud](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). -[Auto-analyse du programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugins] [Lire en un clic](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) le code source de ce projet. -[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugins] En un clic, les projets Python/C/C++/Java/Lua/... peuvent être analysés. -Lire le document de recherche | [Plugins] Lisez le résumé de l'article en latex et générer un résumé. -Traduction et polissage de l'article complet en LaTeX | [Plugins] Une touche pour traduire ou corriger en LaTeX -Génération Commentaire de fonction en vrac | [Plugins] Lisez en un clic les fonctions et générez des commentaires de fonction. -Rapport d'analyse automatique des chats générés | [Plugins] Génère un rapport de synthèse après l'exécution. -[Assistant arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugins] Entrez l'url de l'article arxiv pour traduire le résumé + télécharger le PDF en un clic -[Traduction complète des articles PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugins] Extraire le titre et le résumé de l'article PDF + Traduire le texte entier (multithread) -[Aide à la recherche Google Academ](https://www.bilibili.com/video/BV19L411U7ia) | [Plugins] Donnez à GPT l'URL de n'importe quelle page de recherche Google Academ pour vous aider à sélectionner des articles intéressants -Affichage de formules/images/tableaux | Afficher la forme traduite et rendue d'une formule en même temps, plusieurs formules et surlignage du code prend en charge -Prise en charge des plugins multithread | Prise en charge de l'appel multithread de chatgpt, traitement en masse de texte ou de programmes en un clic -Activer le thème Gradio sombre [theme](https://github.com/binary-husky/chatgpt_academic/issues/173) au démarrage | Ajoutez ```/?__dark-theme=true``` à l'URL du navigateur pour basculer vers le thème sombre -[Prise en charge de plusieurs modèles LLM](https://www.bilibili.com/video/BV1wT411p7yf), [prise en charge de l'interface API2D](https://api2d.com/) | Comment cela serait-il de se faire servir par GPT3.5, GPT4 et la [ChatGLM de Tsinghua](https://github.com/THUDM/ChatGLM-6B) en même temps? -Expérience en ligne d'huggingface sans science | Après vous être connecté à huggingface, copiez [cet espace](https://huggingface.co/spaces/qingxu98/gpt-academic) -... | ... - -
    - - -Vous êtes un traducteur professionnel d'articles universitaires en français. - -Ceci est un fichier Markdown, veuillez le traduire en français sans modifier les commandes Markdown existantes : - -- Nouvelle interface (modifiable en modifiant l'option de mise en page dans config.py pour basculer entre les mises en page gauche-droite et haut-bas) -
    - -
    - - -- Tous les boutons sont générés dynamiquement en lisant functional.py, les utilisateurs peuvent ajouter librement des fonctions personnalisées pour libérer le presse-papiers. -
    - -
    - -- Correction/amélioration -
    - -
    - -- Si la sortie contient des formules, elles seront affichées simultanément sous forme de de texte brut et de forme rendue pour faciliter la copie et la lecture. -
    - -
    - -- Pas envie de lire le code du projet ? Faites votre propre démo avec ChatGPT. -
    - -
    - -- Utilisation combinée de plusieurs modèles de langage sophistiqués (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
    - -
    - -Utilisation combinée de plusieurs modèles de langage sophistiqués en version de test [huggingface](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (la version huggingface ne prend pas en charge Chatglm). - - ---- - -## Installation - Méthode 1 : Exécution directe (Windows, Linux or MacOS) - -1. Téléchargez le projet -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. Configuration de l'API_KEY et des paramètres de proxy - -Dans `config.py`, configurez les paramètres de proxy et de clé d'API OpenAI, comme indiqué ci-dessous -``` -1. Si vous êtes en Chine, vous devez configurer un proxy étranger pour utiliser l'API OpenAI en toute transparence. Pour ce faire, veuillez lire attentivement le fichier config.py (1. Modifiez l'option USE_PROXY ; 2. Modifiez les paramètres de proxies comme indiqué dans les instructions). -2. Configurez votre clé API OpenAI. Vous devez vous inscrire sur le site web d'OpenAI pour obtenir une clé API. Une fois que vous avez votre clé API, vous pouvez la configurer dans le fichier config.py. -3. Tous les problèmes liés aux réseaux de proxy (temps d'attente, non-fonctionnement des proxies) sont résumés dans https://github.com/binary-husky/chatgpt_academic/issues/1. -``` -(Remarque : le programme vérifie d'abord s'il existe un fichier de configuration privé nommé `config_private.py`, et utilise les configurations de celui-ci à la place de celles du fichier `config.py`. Par conséquent, si vous comprenez notre logique de lecture de configuration, nous vous recommandons fortement de créer un nouveau fichier de configuration nommé `config_private.py` à côté de `config.py` et de transférer (copier) les configurations de celui-ci dans `config_private.py`. `config_private.py` n'est pas contrôlé par git et rend vos informations personnelles plus sûres.) - -3. Installation des dépendances -```sh -# (Option 1) Recommandé -python -m pip install -r requirements.txt - -# (Option 2) Si vous utilisez anaconda, les étapes sont similaires : -# (Option 2.1) conda create -n gptac_venv python=3.11 -# (Option 2.2) conda activate gptac_venv -# (Option 2.3) python -m pip install -r requirements.txt - -# note : Utilisez la source pip officielle ou la source pip Alibaba. D'autres sources (comme celles des universités) pourraient poser problème. Pour utiliser temporairement une autre source, utilisez : -# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -``` - -Si vous avez besoin de soutenir ChatGLM de Tsinghua, vous devez installer plus de dépendances (si vous n'êtes pas familier avec Python ou que votre ordinateur n'est pas assez performant, nous vous recommandons de ne pas essayer) : -```sh -python -m pip install -r request_llm/requirements_chatglm.txt -``` - -4. Exécution -```sh -python main.py -``` - -5. Tester les plugins de fonctions -``` -- Test Python Project Analysis - Dans la zone de saisie, entrez `./crazy_functions/test_project/python/dqn`, puis cliquez sur "Parse Entire Python Project" -- Test d'auto-lecture du code - Cliquez sur "[Démo multi-thread] Parser ce projet lui-même (auto-traduction de la source)" -- Test du modèle de fonctionnalité expérimentale (exige une réponse de l'IA à ce qui est arrivé aujourd'hui dans l'histoire). Vous pouvez utiliser cette fonctionnalité comme modèle pour des fonctions plus complexes. - Cliquez sur "[Démo modèle de plugin de fonction] Histoire du Jour" -- Le menu déroulant de la zone de plugin de fonctionnalité contient plus de fonctionnalités à sélectionner. -``` - -## Installation - Méthode 2 : Utilisation de docker (Linux) - - -Vous êtes un traducteur professionnel d'articles académiques en français. - -1. ChatGPT seul (recommandé pour la plupart des gens) -``` sh -# Télécharger le projet -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# Configurer le proxy outre-mer et la clé API OpenAI -Modifier le fichier config.py avec n'importe quel éditeur de texte -# Installer -docker build -t gpt-academic . -# Exécuter -docker run --rm -it --net=host gpt-academic - -# Tester les modules de fonction -## Tester la fonction modèle des modules (requiert la réponse de GPT à "qu'est-ce qui s'est passé dans l'histoire aujourd'hui ?"), vous pouvez utiliser cette fonction en tant que modèle pour implémenter des fonctions plus complexes. -Cliquez sur "[Exemple de modèle de module] Histoire d'aujourd'hui" -## Tester le résumé écrit pour le projet LaTeX -Dans la zone de saisie, tapez ./crazy_functions/test_project/latex/attention, puis cliquez sur "Lire le résumé de l'article de recherche LaTeX" -## Tester l'analyse du projet Python -Dans la zone de saisie, tapez ./crazy_functions/test_project/python/dqn, puis cliquez sur "Analyser l'ensemble du projet Python" - -D'autres fonctions sont disponibles dans la liste déroulante des modules de fonction. -``` - -2. ChatGPT+ChatGLM (nécessite une grande connaissance de docker et une configuration informatique suffisamment puissante) -``` sh -# Modifier le dockerfile -cd docs && nano Dockerfile+ChatGLM -# Comment construire | 如何构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs) -docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# Comment exécuter | 如何运行 (1) Directement exécuter : -docker run --rm -it --net=host --gpus=all gpt-academic -# Comment exécuter | 如何运行 (2) Je veux effectuer quelques ajustements dans le conteneur avant de lancer : -docker run --rm -it --net=host --gpus=all gpt-academic bash -``` - -## Installation - Méthode 3 : Autres méthodes de déploiement - -1. Déploiement sur un cloud serveur distant -Veuillez consulter le [wiki de déploiement-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -2. Utilisation de WSL2 (Windows Subsystem for Linux) -Veuillez consulter le [wiki de déploiement-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - -## Configuration de la procuration de l'installation -### Méthode 1 : Méthode conventionnelle -[Configuration de la procuration](https://github.com/binary-husky/chatgpt_academic/issues/1) - -### Méthode 2 : Tutoriel pour débutant pur -[Tutoriel pour débutant pur](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89) - - ---- - -## Personnalisation des nouveaux boutons pratiques (personnalisation des raccourcis académiques) -Ouvrez le fichier `core_functional.py` avec n'importe quel éditeur de texte, ajoutez les éléments suivants, puis redémarrez le programme. (Si le bouton a déjà été ajouté avec succès et est visible, le préfixe et le suffixe pris en charge peuvent être modifiés à chaud sans avoir besoin de redémarrer le programme.) -Par exemple: -``` -"Traduction Français-Chinois": { - # Préfixe, qui sera ajouté avant votre saisie. Par exemple, pour décrire votre demande, telle que la traduction, le débogage de code, l'amélioration, etc. - "Prefix": "Veuillez traduire le contenu ci-dessous en chinois, puis expliquer chaque terme propre mentionné dans un tableau Markdown :\n\n", - - # Suffixe, qui sera ajouté après votre saisie. Par exemple, en combinaison avec un préfixe, vous pouvez mettre le contenu de votre saisie entre guillemets. - "Suffix": "", -}, -``` - -
    - -
    - ---- - - -## Présentation de certaines fonctionnalités - -### Affichage des images: - -
    - -
    - - -### Si un programme peut comprendre et décomposer lui-même : - -
    - -
    - -
    - -
    - - -### Analyse de tout projet Python/Cpp quelconque : -
    - -
    - -
    - -
    - -### Lecture et résumé générés automatiquement pour les articles en Latex -
    - -
    - -### Génération de rapports automatique -
    - - - -
    - -### Conception de fonctionnalités modulaires -
    - - -
    - - -### Traduction de code source en anglais - -
    - -
    - -## À faire et planification de version : -- version 3.2+ (à faire) : Prise en charge de plus de paramètres d'interface de plugin de fonction -- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles GPT ! Prise en charge de l'API2d, prise en charge de la répartition de charge de plusieurs clés API -- version 3.0 : Prise en charge de chatglm et d'autres petits llm -- version 2.6 : Réorganisation de la structure du plugin, amélioration de l'interactivité, ajout de plus de plugins -- version 2.5 : Mise à jour automatique, résolution du problème de dépassement de jeton et de texte trop long lors de la compilation du code source complet -- version 2.4 : (1) Ajout de la fonctionnalité de traduction intégrale de PDF ; (2) Ajout d'une fonctionnalité de changement de position de zone de saisie ; (3) Ajout d'une option de disposition verticale ; (4) Optimisation du plugin de fonction multi-thread. -- version 2.3 : Amélioration de l'interactivité multi-thread -- version 2.2 : Prise en charge du rechargement à chaud du plugin de fonction -- version 2.1 : Mise en page pliable -- version 2.0 : Introduction du plugin de fonction modulaire -- version 1.0 : Fonctionnalité de base - -## Références et apprentissage - -``` -De nombreux designs d'autres projets exceptionnels ont été utilisés pour référence dans le code, notamment : - -# Projet 1 : De nombreuses astuces ont été empruntées à ChuanhuChatGPT -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Projet 2 : ChatGLM-6B de Tsinghua : -https://github.com/THUDM/ChatGLM-6B -``` - diff --git a/spaces/qingxu98/gpt-academic/config.py b/spaces/qingxu98/gpt-academic/config.py deleted file mode 100644 index fb4dab8128aa5dc3c8cd0bb000e7b06534cab6fd..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/gpt-academic/config.py +++ /dev/null @@ -1,270 +0,0 @@ -""" - 以下所有配置也都支持利用环境变量覆写,环境变量配置格式见docker-compose.yml。 - 读取优先级:环境变量 > config_private.py > config.py - --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- - All the following configurations also support using environment variables to override, - and the environment variable configuration format can be seen in docker-compose.yml. - Configuration reading priority: environment variable > config_private.py > config.py -""" - -# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项 -API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4" - - -# [step 1]>> API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项 -API_KEY = "此处填API密钥" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4" - - -# [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改 -USE_PROXY = False -if USE_PROXY: - """ - 填写格式是 [协议]:// [地址] :[端口],填写之前不要忘记把USE_PROXY改成True,如果直接在海外服务器部署,此处不修改 - <配置教程&视频教程> https://github.com/binary-husky/gpt_academic/issues/1> - [协议] 常见协议无非socks5h/http; 例如 v2**y 和 ss* 的默认本地协议是socks5h; 而cl**h 的默认本地协议是http - [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上) - [端口] 在代理软件的设置里找。虽然不同的代理软件界面不一样,但端口号都应该在最显眼的位置上 - """ - # 代理网络的地址,打开你的*学*网软件查看代理的协议(socks5h / http)、地址(localhost)和端口(11284) - proxies = { - # [协议]:// [地址] :[端口] - "http": "socks5h://localhost:11284", # 再例如 "http": "http://127.0.0.1:7890", - "https": "socks5h://localhost:11284", # 再例如 "https": "http://127.0.0.1:7890", - } -else: - proxies = None - -# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------ - -# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!) -# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"} -# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"} -API_URL_REDIRECT = {} - - -# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。Free trial users的限制是每分钟3次,Pay-as-you-go users的限制是每分钟3500次 -# 一言以蔽之:免费(5刀)用户填3,OpenAI绑了信用卡的用户可以填 16 或者更高。提高限制请查询:https://platform.openai.com/docs/guides/rate-limits/overview -DEFAULT_WORKER_NUM = 3 - - -# 色彩主题, 可选 ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast"] -# 更多主题, 请查阅Gradio主题商店: https://huggingface.co/spaces/gradio/theme-gallery 可选 ["Gstaff/Xkcd", "NoCrypt/Miku", ...] -THEME = "Chuanhu-Small-and-Beautiful" -AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"] - - -# 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效) -CHATBOT_HEIGHT = 1115 - - -# 代码高亮 -CODE_HIGHLIGHT = True - - -# 窗口布局 -LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布局) - - -# 暗色模式 / 亮色模式 -DARK_MODE = True - - -# 发送请求到OpenAI后,等待多久判定为超时 -TIMEOUT_SECONDS = 30 - - -# 网页的端口, -1代表随机端口 -WEB_PORT = -1 - - -# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 -MAX_RETRY = 2 - -# OpenAI模型选择是(gpt4现在只对申请成功的人开放) -LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm" -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo", "spark", "azure-gpt-3.5"] - -# 插件分类默认选项 -DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体'] - - -# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 ) -LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", - "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] -# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", -# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] - - -# 百度千帆(LLM_MODEL="qianfan") -BAIDU_CLOUD_API_KEY = '' -BAIDU_CLOUD_SECRET_KEY = '' -BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat" - - -# 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径 -CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100" - - -# 本地LLM模型如ChatGLM的执行方式 CPU/GPU -LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" -LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本 - - -# 设置gradio的并行线程数(不需要修改) -CONCURRENT_COUNT = 100 - - -# 是否在提交时自动清空输入框 -AUTO_CLEAR_TXT = False - - -# 加一个live2d装饰 -ADD_WAIFU = False - - -# 设置用户名和密码(不需要修改)(相关功能不稳定,与gradio版本和网络都相关,如果本地使用不建议加这个) -# [("username", "password"), ("username2", "password2"), ...] -AUTHENTICATION = [] - - -# 如果需要在二级路径下运行(常规情况下,不要修改!!)(需要配合修改main.py才能生效!) -CUSTOM_PATH = "/" - - -# 极少数情况下,openai的官方KEY需要伴随组织编码(格式如org-xxxxxxxxxxxxxxxxxxxxxxxx)使用 -API_ORG = "" - - -# 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md -SLACK_CLAUDE_BOT_ID = '' -SLACK_CLAUDE_USER_TOKEN = '' - - -# 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md -AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/" -AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写,该选项即将被弃用 -AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md - - -# 使用Newbing -NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"] -NEWBING_COOKIES = """ -put your new bing cookies here -""" - - -# 阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md -ENABLE_AUDIO = False -ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f -ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK -ALIYUN_ACCESSKEY="" # (无需填写) -ALIYUN_SECRET="" # (无需填写) - - -# 接入讯飞星火大模型 https://console.xfyun.cn/services/iat -XFYUN_APPID = "00000000" -XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" -XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - - -# Claude API KEY -ANTHROPIC_API_KEY = "" - - -# 自定义API KEY格式 -CUSTOM_API_KEY_PATTERN = "" - - -# HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens -HUGGINGFACE_ACCESS_TOKEN = "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" - - -# GROBID服务器地址(填写多个可以均衡负载),用于高质量地读取PDF文档 -# 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space" -GROBID_URLS = [ - "https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space", - "https://shaocongma-grobid.hf.space","https://FBR123-grobid.hf.space", "https://yeku-grobid.hf.space", -] - - -# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭 -ALLOW_RESET_CONFIG = False - - -# 临时的上传文件夹位置,请勿修改 -PATH_PRIVATE_UPLOAD = "private_upload" - - -# 日志文件夹的位置,请勿修改 -PATH_LOGGING = "gpt_log" - - -# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改 -WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules"] - - -""" -在线大模型配置关联关系示意图 -│ -├── "gpt-3.5-turbo" 等openai模型 -│ ├── API_KEY -│ ├── CUSTOM_API_KEY_PATTERN(不常用) -│ ├── API_ORG(不常用) -│ └── API_URL_REDIRECT(不常用) -│ -├── "azure-gpt-3.5" 等azure模型 -│ ├── API_KEY -│ ├── AZURE_ENDPOINT -│ ├── AZURE_API_KEY -│ ├── AZURE_ENGINE -│ └── API_URL_REDIRECT -│ -├── "spark" 星火认知大模型 spark & sparkv2 -│ ├── XFYUN_APPID -│ ├── XFYUN_API_SECRET -│ └── XFYUN_API_KEY -│ -├── "claude-1-100k" 等claude模型 -│ └── ANTHROPIC_API_KEY -│ -├── "stack-claude" -│ ├── SLACK_CLAUDE_BOT_ID -│ └── SLACK_CLAUDE_USER_TOKEN -│ -├── "qianfan" 百度千帆大模型库 -│ ├── BAIDU_CLOUD_QIANFAN_MODEL -│ ├── BAIDU_CLOUD_API_KEY -│ └── BAIDU_CLOUD_SECRET_KEY -│ -├── "newbing" Newbing接口不再稳定,不推荐使用 - ├── NEWBING_STYLE - └── NEWBING_COOKIES - - -用户图形界面布局依赖关系示意图 -│ -├── CHATBOT_HEIGHT 对话窗的高度 -├── CODE_HIGHLIGHT 代码高亮 -├── LAYOUT 窗口布局 -├── DARK_MODE 暗色模式 / 亮色模式 -├── DEFAULT_FN_GROUPS 插件分类默认选项 -├── THEME 色彩主题 -├── AUTO_CLEAR_TXT 是否在提交时自动清空输入框 -├── ADD_WAIFU 加一个live2d装饰 -├── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性 - - -插件在线服务配置依赖关系示意图 -│ -├── 语音功能 -│ ├── ENABLE_AUDIO -│ ├── ALIYUN_TOKEN -│ ├── ALIYUN_APPKEY -│ ├── ALIYUN_ACCESSKEY -│ └── ALIYUN_SECRET -│ -├── PDF文档精准解析 -│ └── GROBID_URLS - -""" diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Acca Certus Crack Trial 45.md b/spaces/quidiaMuxgu/Expedit-SAM/Acca Certus Crack Trial 45.md deleted file mode 100644 index 5a768ac069f7c1a9bd4ea0fe62863ca3808d714e..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Acca Certus Crack Trial 45.md +++ /dev/null @@ -1,65 +0,0 @@ - -

    Acca Certus Crack Trial 45: What You Need to Know

    -

    If you are looking for a software that can help you with software sicurezza cantieri, computo metrico e contabilit lavori, construction time-scheduling and accounting, you might have heard of Acca Certus. Acca Certus is a software developed by ACCA Software, a leading company in the field of architecture, engineering and construction software. Acca Certus is designed to help you comply with the regulations and standards of safety and health at work, as well as to optimize the management and control of your projects.

    -

    However, Acca Certus is not a cheap software. The full version of Acca Certus can cost up to 2,000 euros, depending on the features and modules you need. That's why some people might be tempted to look for a crack version of Acca Certus, such as the Acca Certus Crack Trial 45. This is a version of Acca Certus that claims to extend the trial period of 45 days and unlock all the features of the software.

    -

    acca certus crack trial 45


    Download File ··· https://geags.com/2uCrCs



    -

    Is Acca Certus Crack Trial 45 Worth It?

    -

    The short answer is no. Acca Certus Crack Trial 45 is not worth it, and here are some reasons why:

    -
      -
    • Acca Certus Crack Trial 45 is illegal. By using a crack version of Acca Certus, you are violating the intellectual property rights of ACCA Software and exposing yourself to legal consequences.
    • -
    • Acca Certus Crack Trial 45 is unsafe. By downloading and installing a crack version of Acca Certus, you are risking your computer's security and performance. You might end up with viruses, malware, spyware, or other harmful programs that can damage your system or steal your data.
    • -
    • Acca Certus Crack Trial 45 is unreliable. By using a crack version of Acca Certus, you are compromising the quality and accuracy of your work. You might encounter errors, bugs, crashes, or compatibility issues that can affect your productivity and results.
    • -
    • Acca Certus Crack Trial 45 is unethical. By using a crack version of Acca Certus, you are disrespecting the hard work and innovation of ACCA Software and its developers. You are also depriving yourself of the benefits and support that come with a legitimate version of Acca Certus.
    • -
    -

    What Are the Alternatives to Acca Certus Crack Trial 45?

    -

    If you want to use Acca Certus without breaking the law or compromising your work, you have some alternatives to Acca Certus Crack Trial 45:

    -
      -
    • You can use the free trial version of Acca Certus for 45 days. This will give you access to all the features and modules of Acca Certus for a limited time. You can use this opportunity to test the software and see if it meets your needs and expectations.
    • -
    • You can buy the full version of Acca Certus with a discount. ACCA Software offers various discounts and promotions for its products, depending on the season, the type of customer, or the number of licenses. You can check their website or contact their sales team for more information.
    • -
    • You can use other software that are similar to Acca Certus. There are many other software that can help you with software sicurezza cantieri, computo metrico e contabilit lavori, construction time-scheduling and accounting. Some examples are Edificius, PriMus, TermuS, or StruCalc. You can compare their features, prices, reviews, and ratings to find the best option for you.
    • -
    -

    Conclusion

    -

    Acca Certus is a powerful and professional software that can help you with software sicurezza cantieri, computo metrico e contabilit lavori, construction time-scheduling and accounting. However, using a crack version of Acca Certus, such as the Acca Certus Crack Trial 45, is not a good idea. It is illegal, unsafe, unreliable, and unethical. Instead, you should use the free trial version of Acca Certus for 45 days, buy the full version of Acca Certus with a discount, or use other software that are similar to Acca Certus.

    -

    How to Use Acca Certus Effectively

    -

    If you have decided to use Acca Certus for your projects, you might be wondering how to use it effectively. Here are some tips and best practices that can help you get the most out of Acca Certus:

    -
      -
    • Read the user manual and watch the video tutorials. Acca Certus comes with a comprehensive user manual and a series of video tutorials that can guide you through the installation, activation, configuration, and usage of the software. You can find them on the ACCA Software website or in the software itself.
    • -
    • Use the templates and wizards. Acca Certus offers various templates and wizards that can help you create your projects faster and easier. You can choose from different types of projects, such as residential, commercial, industrial, or public. You can also customize the templates and wizards according to your preferences and needs.
    • -
    • Use the integrated modules and features. Acca Certus integrates various modules and features that can help you with different aspects of your projects, such as design, analysis, calculation, verification, documentation, printing, and sharing. You can use them to optimize your workflow and improve your results.
    • -
    • Update the software regularly. ACCA Software constantly updates Acca Certus with new features, improvements, bug fixes, and compatibility with the latest regulations and standards. You can update the software automatically or manually from the software itself or from the ACCA Software website.
    • -
    • Contact the support team if you have any questions or issues. ACCA Software provides a dedicated support team that can assist you with any questions or issues you might have with Acca Certus. You can contact them by phone, email, chat, or ticket from the ACCA Software website or from the software itself.
    • -
    -

    Conclusion

    -

    Acca Certus is a powerful and professional software that can help you with software sicurezza cantieri, computo metrico e contabilit lavori, construction time-scheduling and accounting. However, using a crack version of Acca Certus, such as the Acca Certus Crack Trial 45, is not a good idea. It is illegal, unsafe, unreliable, and unethical. Instead, you should use the free trial version of Acca Certus for 45 days, buy the full version of Acca Certus with a discount, or use other software that are similar to Acca Certus. If you want to use Acca Certus effectively, you should read the user manual and watch the video tutorials, use the templates and wizards, use the integrated modules and features, update the software regularly, and contact the support team if you have any questions or issues.

    -

    -

    What Are the Benefits of Acca Certus

    -

    Acca Certus is not only a software that can help you with software sicurezza cantieri, computo metrico e contabilit lavori, construction time-scheduling and accounting. It is also a software that can bring you many benefits for your business and your career. Here are some of the benefits of Acca Certus:

    -
      -
    • Acca Certus can help you save time and money. By using Acca Certus, you can reduce the time and costs of your projects, as well as avoid errors, mistakes, and penalties. You can also increase your productivity and efficiency, as well as your profitability and competitiveness.
    • -
    • Acca Certus can help you improve your quality and accuracy. By using Acca Certus, you can ensure the quality and accuracy of your projects, as well as comply with the regulations and standards of safety and health at work. You can also improve your reputation and credibility, as well as your customer satisfaction and loyalty.
    • -
    • Acca Certus can help you expand your skills and knowledge. By using Acca Certus, you can learn new skills and knowledge related to software sicurezza cantieri, computo metrico e contabilit lavori, construction time-scheduling and accounting. You can also update your skills and knowledge with the latest features, improvements, and updates of Acca Certus.
    • -
    • Acca Certus can help you grow your network and opportunities. By using Acca Certus, you can join a network of professionals and experts in the field of architecture, engineering and construction software. You can also access new opportunities and markets for your projects and services.
    • -
    -

    Conclusion

    -

    Acca Certus is a powerful and professional software that can help you with software sicurezza cantieri, computo metrico e contabilit lavori, construction time-scheduling and accounting. However, using a crack version of Acca Certus, such as the Acca Certus Crack Trial 45, is not a good idea. It is illegal, unsafe, unreliable, and unethical. Instead, you should use the free trial version of Acca Certus for 45 days, buy the full version of Acca Certus with a discount, or use other software that are similar to Acca Certus. If you want to use Acca Certus effectively, you should read the user manual and watch the video tutorials, use the templates and wizards, use the integrated modules and features, update the software regularly, and contact the support team if you have any questions or issues. If you want to enjoy the benefits of Acca Certus, you should use it to save time and money, improve your quality and accuracy, expand your skills and knowledge, and grow your network and opportunities.

    -

    How to Choose the Right Version of Acca Certus

    -

    Acca Certus is a software that comes in different versions and editions, depending on your needs and preferences. Here are some of the factors that you should consider when choosing the right version of Acca Certus:

    -
      -
    • The type and size of your projects. Acca Certus offers different versions and editions for different types and sizes of projects, such as residential, commercial, industrial, or public. You should choose the version and edition that best suits your project's characteristics and requirements.
    • -
    • The features and modules that you need. Acca Certus offers different features and modules for different aspects of your projects, such as design, analysis, calculation, verification, documentation, printing, and sharing. You should choose the version and edition that includes the features and modules that you need and use most frequently.
    • -
    • The budget that you have. Acca Certus offers different prices and payment options for different versions and editions of the software. You should choose the version and edition that fits your budget and payment preferences.
    • -
    • The support and updates that you want. Acca Certus offers different levels of support and updates for different versions and editions of the software. You should choose the version and edition that provides you with the support and updates that you want and expect.
    • -
    -

    How to Get Started with Acca Certus

    -

    If you have chosen the right version of Acca Certus for your projects, you might be wondering how to get started with the software. Here are some steps that you should follow to get started with Acca Certus:

    -
      -
    1. Download and install Acca Certus on your computer. You can download Acca Certus from the ACCA Software website or from the link that you receive by email after purchasing the software. You can install Acca Certus by following the instructions on the screen.
    2. -
    3. Activate Acca Certus with your license key. You can activate Acca Certus by entering your license key in the software or in the ACCA Software website. You can find your license key in the email that you receive after purchasing the software or in your ACCA Software account.
    4. -
    5. Create your first project with Acca Certus. You can create your first project with Acca Certus by using a template or a wizard, or by starting from scratch. You can enter the basic information of your project, such as the name, the location, the type, and the size.
    6. -
    7. Add and edit the elements of your project with Acca Certus. You can add and edit the elements of your project with Acca Certus by using the integrated modules and features of the software. You can design, analyze, calculate, verify, document, print, and share your project with Acca Certus.
    8. -
    9. Save and export your project with Acca Certus. You can save and export your project with Acca Certus by using the file formats and options that are compatible with your needs and preferences. You can save and export your project as a PDF, a DWG, a DXF, a DOCX, an XLSX, or other formats.
    10. -
    -

    Conclusion

    -

    Acca Certus is a powerful and professional software that can help you with software sicurezza cantieri, computo metrico e contabilit lavori, construction time-scheduling and accounting. However, using a crack version of Acca Certus, such as the Acca Certus Crack Trial 45, is not a good idea. It is illegal, unsafe, unreliable, and unethical. Instead, you should use the free trial version of Acca Certus for 45 days, buy the full version of Acca Certus with a discount, or use other software that are similar to Acca Certus. If you want to use Acca Certus effectively, you should read the user manual and watch the video tutorials, use the templates and wizards, use the integrated modules and features, update the software regularly, and contact the support team if you have any questions or issues. If you want to enjoy the benefits of Acca Certus, you should use it to save time and money, improve your quality and accuracy, expand your skills and knowledge, and grow your network and opportunities. If you want to choose the right version of Acca Certus for your projects, you should consider the type and size of your projects, the features and modules that you need, the budget that you have, and the support and updates that you want. If you want to get started with Acca Certus for your projects

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (rockstar 1080p Bluray Movie Download VERIFIED).md b/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (rockstar 1080p Bluray Movie Download VERIFIED).md deleted file mode 100644 index 3be02e3c2b628d5bc27b05f7f884f367dd847892..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/HD Online Player (rockstar 1080p Bluray Movie Download VERIFIED).md +++ /dev/null @@ -1,6 +0,0 @@ -

    HD Online Player (rockstar 1080p bluray movie download)


    Download ===== https://geags.com/2uCrHi



    -
    -I usually buy CDs, rip them, and then leave them taking dust. ... MP3 Player Lossless HiFi Music Touch Tone Metal DSD WAV AIFF WMA ALAC ... Ghost Recon Alpha 2012 1080p BluRay FLAC2 0 x264-HiFi Language: English 2. ... 18+ Movies , Watch 18+ Full Movie Online Free Now, 18+ Movies Online ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Libro El Agua De La Vida John Armstrong Pdfl.md b/spaces/quidiaMuxgu/Expedit-SAM/Libro El Agua De La Vida John Armstrong Pdfl.md deleted file mode 100644 index 3f06c7764123fdc1a11a792e86bac3e43e72fda4..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Libro El Agua De La Vida John Armstrong Pdfl.md +++ /dev/null @@ -1,7 +0,0 @@ -

    Libro El Agua De La Vida John Armstrong Pdfl


    Download Filehttps://geags.com/2uCqGS



    -
    -[PORTABLE] Libro El Agua De La Vida John Armstrong Pdf Download book el agua de la vida john armstrong pdf John Armstrong Pdf Libro El Agua De La Vida John Armstrong Libro El Agua De La Vida John Armstrong Libro El Agua De La Vida John Armstrong Libro El Agua De La Vida John Armstrong Pdf Libro El Agua De La Vida John Armstrong -The Penguin Dictionary Of English Literature - download free pdf, djvu, buy the book The Penguin Dictionary Of English Literature - download free pdf, djvu, buy book 8a78ff9644
    -
    -
    -

    diff --git a/spaces/radames/Real-Time-Latent-Consistency-Model-Text-To-Image/canny_gpu.py b/spaces/radames/Real-Time-Latent-Consistency-Model-Text-To-Image/canny_gpu.py deleted file mode 100644 index be6c2f75ef6554a0122f4ebd96301080a8e24303..0000000000000000000000000000000000000000 --- a/spaces/radames/Real-Time-Latent-Consistency-Model-Text-To-Image/canny_gpu.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -import torch.nn as nn -from torchvision.transforms import ToTensor, ToPILImage -from PIL import Image - -class SobelOperator(nn.Module): - def __init__(self, device="cuda"): - super(SobelOperator, self).__init__() - self.device = device - self.edge_conv_x = nn.Conv2d(1, 1, kernel_size=3, padding=1, bias=False).to( - self.device - ) - self.edge_conv_y = nn.Conv2d(1, 1, kernel_size=3, padding=1, bias=False).to( - self.device - ) - - sobel_kernel_x = torch.tensor( - [[-1.0, 0.0, 1.0], [-2.0, 0.0, 2.0], [-1.0, 0.0, 1.0]], device=self.device - ) - sobel_kernel_y = torch.tensor( - [[-1.0, -2.0, -1.0], [0.0, 0.0, 0.0], [1.0, 2.0, 1.0]], device=self.device - ) - - self.edge_conv_x.weight = nn.Parameter(sobel_kernel_x.view((1, 1, 3, 3))) - self.edge_conv_y.weight = nn.Parameter(sobel_kernel_y.view((1, 1, 3, 3))) - - @torch.no_grad() - def forward(self, image: Image.Image, low_threshold: float, high_threshold: float): - # Convert PIL image to PyTorch tensor - image_gray = image.convert("L") - image_tensor = ToTensor()(image_gray).unsqueeze(0).to(self.device) - - # Compute gradients - edge_x = self.edge_conv_x(image_tensor) - edge_y = self.edge_conv_y(image_tensor) - edge = torch.sqrt(edge_x**2 + edge_y**2) - - # Apply thresholding - edge = edge / edge.max() # Normalize to 0-1 - edge[edge >= high_threshold] = 1.0 - edge[edge <= low_threshold] = 0.0 - - # Convert the result back to a PIL image - return ToPILImage()(edge.squeeze(0).cpu()) diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/interface/pixel2style2pixel/datasets/inference_dataset.py b/spaces/radames/UserControllableLT-Latent-Transformer/interface/pixel2style2pixel/datasets/inference_dataset.py deleted file mode 100644 index de457349b0726932176f21814c61e34f15955bb7..0000000000000000000000000000000000000000 --- a/spaces/radames/UserControllableLT-Latent-Transformer/interface/pixel2style2pixel/datasets/inference_dataset.py +++ /dev/null @@ -1,22 +0,0 @@ -from torch.utils.data import Dataset -from PIL import Image -from utils import data_utils - - -class InferenceDataset(Dataset): - - def __init__(self, root, opts, transform=None): - self.paths = sorted(data_utils.make_dataset(root)) - self.transform = transform - self.opts = opts - - def __len__(self): - return len(self.paths) - - def __getitem__(self, index): - from_path = self.paths[index] - from_im = Image.open(from_path) - from_im = from_im.convert('RGB') if self.opts.label_nc == 0 else from_im.convert('L') - if self.transform: - from_im = self.transform(from_im) - return from_im diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/interface/pixel2style2pixel/predict.py b/spaces/radames/UserControllableLT-Latent-Transformer/interface/pixel2style2pixel/predict.py deleted file mode 100644 index b56972c3a60f424cd7e5ae6151af6940d2a96671..0000000000000000000000000000000000000000 --- a/spaces/radames/UserControllableLT-Latent-Transformer/interface/pixel2style2pixel/predict.py +++ /dev/null @@ -1,187 +0,0 @@ -import os -import tempfile -from cog import BasePredictor, Input, Path -import shutil -from argparse import Namespace -import time -import sys -import pprint -import numpy as np -from PIL import Image -import torch -import torchvision.transforms as transforms -import dlib - -sys.path.append(".") -sys.path.append("..") - -from datasets import augmentations -from utils.common import tensor2im, log_input_image -from models.psp import pSp -from scripts.align_all_parallel import align_face - - -class Predictor(BasePredictor): - def setup(self): - self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") - model_paths = { - "ffhq_frontalize": "pretrained_models/psp_ffhq_frontalization.pt", - "celebs_sketch_to_face": "pretrained_models/psp_celebs_sketch_to_face.pt", - "celebs_super_resolution": "pretrained_models/psp_celebs_super_resolution.pt", - "toonify": "pretrained_models/psp_ffhq_toonify.pt", - } - - loaded_models = {} - for key, value in model_paths.items(): - loaded_models[key] = torch.load(value, map_location="cpu") - - self.opts = {} - for key, value in loaded_models.items(): - self.opts[key] = value["opts"] - - for key in self.opts.keys(): - self.opts[key]["checkpoint_path"] = model_paths[key] - if "learn_in_w" not in self.opts[key]: - self.opts[key]["learn_in_w"] = False - if "output_size" not in self.opts[key]: - self.opts[key]["output_size"] = 1024 - - self.transforms = {} - for key in model_paths.keys(): - if key in ["ffhq_frontalize", "toonify"]: - self.transforms[key] = transforms.Compose( - [ - transforms.Resize((256, 256)), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - elif key == "celebs_sketch_to_face": - self.transforms[key] = transforms.Compose( - [transforms.Resize((256, 256)), transforms.ToTensor()] - ) - elif key == "celebs_super_resolution": - self.transforms[key] = transforms.Compose( - [ - transforms.Resize((256, 256)), - augmentations.BilinearResize(factors=[16]), - transforms.Resize((256, 256)), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - - def predict( - self, - image: Path = Input(description="input image"), - model: str = Input( - choices=[ - "celebs_sketch_to_face", - "ffhq_frontalize", - "celebs_super_resolution", - "toonify", - ], - description="choose model type", - ), - ) -> Path: - opts = self.opts[model] - opts = Namespace(**opts) - pprint.pprint(opts) - - net = pSp(opts) - net.eval() - net.cuda() - print("Model successfully loaded!") - - original_image = Image.open(str(image)) - if opts.label_nc == 0: - original_image = original_image.convert("RGB") - else: - original_image = original_image.convert("L") - original_image.resize( - (self.opts[model]["output_size"], self.opts[model]["output_size"]) - ) - - # Align Image - if model not in ["celebs_sketch_to_face", "celebs_seg_to_face"]: - input_image = self.run_alignment(str(image)) - else: - input_image = original_image - - img_transforms = self.transforms[model] - transformed_image = img_transforms(input_image) - - if model in ["celebs_sketch_to_face", "celebs_seg_to_face"]: - latent_mask = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17] - else: - latent_mask = None - - with torch.no_grad(): - result_image = run_on_batch( - transformed_image.unsqueeze(0), net, latent_mask - )[0] - input_vis_image = log_input_image(transformed_image, opts) - output_image = tensor2im(result_image) - - if model == "celebs_super_resolution": - res = np.concatenate( - [ - np.array( - input_vis_image.resize( - ( - self.opts[model]["output_size"], - self.opts[model]["output_size"], - ) - ) - ), - np.array( - output_image.resize( - ( - self.opts[model]["output_size"], - self.opts[model]["output_size"], - ) - ) - ), - ], - axis=1, - ) - else: - res = np.array( - output_image.resize( - (self.opts[model]["output_size"], self.opts[model]["output_size"]) - ) - ) - - out_path = Path(tempfile.mkdtemp()) / "out.png" - Image.fromarray(np.array(res)).save(str(out_path)) - return out_path - - def run_alignment(self, image_path): - aligned_image = align_face(filepath=image_path, predictor=self.predictor) - print("Aligned image has shape: {}".format(aligned_image.size)) - return aligned_image - - -def run_on_batch(inputs, net, latent_mask=None): - if latent_mask is None: - result_batch = net(inputs.to("cuda").float(), randomize_noise=False) - else: - result_batch = [] - for image_idx, input_image in enumerate(inputs): - # get latent vector to inject into our input image - vec_to_inject = np.random.randn(1, 512).astype("float32") - _, latent_to_inject = net( - torch.from_numpy(vec_to_inject).to("cuda"), - input_code=True, - return_latents=True, - ) - # get output image with injected style vector - res = net( - input_image.unsqueeze(0).to("cuda").float(), - latent_mask=latent_mask, - inject_latent=latent_to_inject, - resize=False, - ) - result_batch.append(res) - result_batch = torch.cat(result_batch, dim=0) - return result_batch diff --git a/spaces/radames/transformers-js-sveltekit-static-example-app/_app/immutable/nodes/0.b7b3585a.js b/spaces/radames/transformers-js-sveltekit-static-example-app/_app/immutable/nodes/0.b7b3585a.js deleted file mode 100644 index 816ec9c200b8b4bae249cbd0b42e8be69591d2d2..0000000000000000000000000000000000000000 --- a/spaces/radames/transformers-js-sveltekit-static-example-app/_app/immutable/nodes/0.b7b3585a.js +++ /dev/null @@ -1 +0,0 @@ -import{s as l,c as r,u as i,g as u,d as _}from"../chunks/scheduler.e108d1fd.js";import{S as c,i as f,d as p,t as d}from"../chunks/index.7e6319f2.js";const m=!0,g="always",h=Object.freeze(Object.defineProperty({__proto__:null,prerender:m,trailingSlash:g},Symbol.toStringTag,{value:"Module"}));function $(n){let s;const a=n[1].default,e=r(a,n,n[0],null);return{c(){e&&e.c()},l(t){e&&e.l(t)},m(t,o){e&&e.m(t,o),s=!0},p(t,[o]){e&&e.p&&(!s||o&1)&&i(e,a,t,t[0],s?_(a,t[0],o,null):u(t[0]),null)},i(t){s||(p(e,t),s=!0)},o(t){d(e,t),s=!1},d(t){e&&e.d(t)}}}function y(n,s,a){let{$$slots:e={},$$scope:t}=s;return n.$$set=o=>{"$$scope"in o&&a(0,t=o.$$scope)},[t,e]}class v extends c{constructor(s){super(),f(this,s,y,$,l,{})}}export{v as component,h as universal}; diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Evoscan 2.7 Serial.rar A Step-by-Step Guide to Unlocking Evoscan Features.md b/spaces/raedeXanto/academic-chatgpt-beta/Evoscan 2.7 Serial.rar A Step-by-Step Guide to Unlocking Evoscan Features.md deleted file mode 100644 index ede3b1f38d064978de5fb0ee0a582edc6bd42219..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Evoscan 2.7 Serial.rar A Step-by-Step Guide to Unlocking Evoscan Features.md +++ /dev/null @@ -1,178 +0,0 @@ - -

    Evoscan 2.7 Serial.rar: How to Crack Evoscan Software

    -

    If you are a car enthusiast who wants to tune and monitor your Mitsubishi, Subaru, or Mazda vehicle, you might have heard of Evoscan software. Evoscan is a powerful tool that allows you to connect your laptop to your car's ECU (engine control unit) and access a wealth of data and settings. However, Evoscan is not free, and you need a license key to activate it. That's why some people resort to using Evoscan 2.7 Serial.rar, a crack tool that claims to generate a valid serial number for Evoscan software. But is it safe and legal to use? And are there any alternatives to it? In this article, we will answer these questions and more.

    -

    Evoscan 2.7 Serial.rar


    DOWNLOAD ——— https://tinourl.com/2uL1PU



    -

    What is Evoscan and why do you need it?

    -

    Evoscan is a software tool for tuning and logging data from Mitsubishi, Subaru, and Mazda vehicles. It works with a variety of OBD-II (on-board diagnostics) cables and adapters that connect your laptop to your car's ECU. With Evoscan, you can:

    -
      -
    • Read and clear diagnostic trouble codes (DTCs) that indicate problems with your engine, transmission, brakes, airbags, etc.
    • -
    • View real-time data from various sensors and gauges, such as engine speed, coolant temperature, air/fuel ratio, boost pressure, etc.
    • -
    • Record data logs for later analysis or comparison.
    • -
    • Adjust ECU settings such as fuel maps, ignition timing, boost control, etc.
    • -
    • Flash new ECU firmware or ROM files.
    • -
    • Test various ECU functions such as fans, injectors, solenoids, etc.
    • -
    -

    Evoscan can help you improve your car's performance, fuel economy, reliability, and emissions. It can also help you diagnose and fix problems with your car before they become serious or costly. Evoscan is especially useful for modifying or tuning your car for racing or custom purposes.

    -

    Evoscan features and benefits

    -

    Some of the features and benefits of Evoscan are:

    -
      -
    • It supports a wide range of Mitsubishi vehicles from 1994 to 2015, including Lancer Evolution, Eclipse, Galant, 3000GT, Montero, Outlander, etc.
    • -
    • It also supports some Subaru vehicles from 1999 to 2010, such as Impreza WRX/STI, Forester XT/STI, Legacy GT/Spec B/Outback XT.
    • -
    • It also supports some Mazda vehicles from 2005 to 2010, such as MazdaSpeed 3/6/CX-7/RX-8.
    • -
    • It works with various OBD-II cables and adapters that use FTDI or Prolific chipsets.
    • -
    • It has a user-friendly interface that displays data in graphs, gauges, tables, or text formats.
    • -
    • It has a built-in map editor that allows you to modify ECU maps easily.
    • -
    • It has a built-in ROM downloader that allows you to download ECU ROM files from the internet.
    • -
    • It has a built-in ROM patcher that allows you to apply patches or mods to ECU ROM files.
    • -
    • It has a built-in ROM comparer that allows you to compare two ECU ROM files side by side.
    • -
    • It has a built-in ROM analyzer that allows you to analyze ECU ROM files for errors or anomalies.
    • -
    -

    Evoscan compatibility and requirements

    -

    To use Evoscan software, you need:

    -

    Evoscan 2.7 crack download
    -Evoscan 2.7 keygen free
    -Evoscan 2.7 license code generator
    -Evoscan 2.7 activation key rar
    -Evoscan 2.7 full version zip
    -Evoscan 2.7 serial number online
    -Evoscan 2.7 registration code torrent
    -Evoscan 2.7 patch file
    -Evoscan 2.7 product key finder
    -Evoscan 2.7 software update
    -Evoscan 2.7 latest version mega
    -Evoscan 2.7 premium edition mediafire
    -Evoscan 2.7 pro crack serial
    -Evoscan 2.7 unlock code free
    -Evoscan 2.7 working keygen rar
    -Evoscan 2.7 cracked version download
    -Evoscan 2.7 license key generator online
    -Evoscan 2.7 activation code zip
    -Evoscan 2.7 full version with crack torrent
    -Evoscan 2.7 serial number generator free
    -Evoscan 2.7 registration code online
    -Evoscan 2.7 patch download mega
    -Evoscan 2.7 product key online
    -Evoscan 2.7 software crack zip
    -Evoscan 2.7 latest version with keygen mediafire
    -Evoscan 2.7 premium edition crack serial
    -Evoscan 2.7 pro license code free
    -Evoscan 2.7 unlock code generator rar
    -Evoscan 2.7 working crack download
    -Evoscan 2.7 cracked version free online
    -Evoscan 2.7 license key online zip
    -Evoscan 2.7 activation code torrent rar
    -Evoscan 2.7 full version with keygen download
    -Evoscan 2.7 serial number online free
    -Evoscan 2.7 registration code generator torrent
    -Evoscan 2.7 patch file mega zip
    -Evoscan 2.7 product key finder online
    -Evoscan 2.7 software update crack rar
    -Evoscan 2.7 latest version with crack mediafire zip
    -Evoscan 2.7 premium edition keygen serial free
    -Evoscan 2.7 pro activation code rar zip
    -Evoscan 2.7 unlock code online free torrent
    -Evoscan 2.7 working keygen download mega rar
    -Evoscan 2.7 cracked version with patch online
    -Evoscan 2.7 license key generator torrent zip
    -Evoscan 2.7 activation code online rar
    -Evoscan 2.7 full version with patch download
    -Evoscan 2.7 serial number generator torrent free
    -Evoscan 2.7 registration code online zip
    -Evoscan 2.7 patch file mediafire rar

    -
      -
    • A Windows PC or laptop running Windows XP/Vista/7/8/10.
    • -
    • An OBD-II cable or adapter that connects your PC or laptop to your car's OBD-II port.
    • -
    • A license key that activates the software.
    • -
    -

    The license key costs $25 USD and can be purchased from the official website of Evoscan: https://www.evoscan.com/. The license key is valid for one PC or laptop only and can be transferred to another PC or laptop if needed. The license key also entitles you to free updates and support for the software.

    -

    What is Evoscan 2.7 Serial.rar and how does it work?

    -

    Evoscan 2.7 Serial.rar is a crack tool that claims to generate a valid serial number for Evoscan software. It is a small file that can be downloaded from various websites or file-sharing platforms. It is usually compressed in a RAR format that requires a program like WinRAR or 7-Zip to extract it. The extracted file contains an executable file named "Evoscan 2.7 Serial.exe" that runs the crack tool.

    -

    The crack tool works by scanning the registry key settings of your PC or laptop for the serial number of Evoscan software. If it finds one, it displays it on the screen. If it does not find one, it generates a random serial number that looks like this: "EVOSCAN-XXXX-XXXX-XXXX-XXXX". You can then copy this serial number and paste it into the activation window of Evoscan software. The crack tool claims that this serial number will activate the software permanently without any limitations or restrictions.

    -

    Evoscan 2.7 Serial.rar overview and download link

    -

    The following table summarizes some information about Evoscan 2.7 Serial.rar:

    - - - - - - - - - -
    NameDescription
    TypeA crack tool for generating serial numbers for Evoscan software
    SizeAbout 1 MB
    FormatRAR
    DateAbout 2014
    SourceVarious websites or file-sharing platforms
    StatusPotentially unsafe or illegal
    Download link
    -

    How to use Evoscan 2.7 Serial.rar to crack Evoscan software

    -

    The following steps describe how to use Evoscan 2.7 Serial.rar to crack Evoscan software:

    -
      -
    1. Download Evoscan 2.7 Serial.rar from the download link provided above or from another source.
    2. -
    3. Extract the RAR file using a program like WinRAR or 7-Zip.
    4. -
    5. Run the executable file named "Evoscan 2.7 Serial.exe" as administrator.
    6. -
    7. If the crack tool finds an existing serial number for Evoscan software on your PC or laptop, it will display it on the screen. You can copy this serial number and use it to activate the software.
    8. -
    9. If the crack tool does not find an existing serial number for Evoscan software on your PC or laptop, it will generate a random serial number that looks like this: "EVOSCAN-XXXX-XXXX-XXXX-XXXX". You can copy this serial number and use it to activate the software.
    10. -
    11. To activate the software, open Evoscan software on your PC or laptop and click on the "Activate" button on the top right corner of the window.
    12. -
    13. Paste the serial number into the activation window and click on the "Activate" button again.
    14. -
    15. If the activation is successful, you will see a message saying "Activation Successful". You can then close the activation window and enjoy using the software without any limitations or

      Risks and precautions of using Evoscan 2.7 Serial.rar

      -

      Using Evoscan 2.7 Serial.rar to crack Evoscan software may seem like a convenient and cost-effective way to access the software, but it also comes with many risks and drawbacks. Some of these are:

      -
        -
      • It is illegal and unethical. Using cracked software violates the software license agreement and the intellectual property rights of the software developer. It also deprives them of the revenue they deserve for their hard work and innovation. You could face legal consequences such as fines or imprisonment if you are caught using or distributing cracked software .
      • -
      • It is unsafe and unreliable. Cracked software may contain viruses, malware, spyware, or other harmful programs that can damage your PC or laptop, steal your personal information, or compromise your online security . Cracked software may also have bugs, errors, or missing features that can affect its functionality or performance . Cracked software may also stop working after a certain period or after an update.
      • -
      • It is unsupported and outdated. Cracked software does not receive any updates or support from the software developer. This means that you will not be able to access new features, improvements, bug fixes, or security patches that are available for the legitimate version of the software . This can make your software vulnerable to hackers, incompatible with other programs or devices, or obsolete over time .
      • -
      -

      To avoid these risks and drawbacks, you should not use Evoscan 2.7 Serial.rar to crack Evoscan software. Instead, you should purchase a legitimate license key from the official website of Evoscan or look for other alternatives to Evoscan software.

      -

      Alternatives to Evoscan 2.7 Serial.rar

      -

      If you want to use Evoscan software without breaking the law or risking your PC or laptop's safety, you have two main options:

      -

      Official Evoscan license and support

      -

      The best option is to buy an official license key from the official website of Evoscan: https://www.evoscan.com/. The license key costs $25 USD and can be purchased using PayPal or credit card. The license key is valid for one PC or laptop only and can be transferred to another PC or laptop if needed. The license key also entitles you to free updates and support for the software.

      -

      By buying an official license key, you will be able to enjoy all the features and benefits of Evoscan software without any limitations or restrictions. You will also be able to access new features, improvements, bug fixes, and security patches that are released by the software developer. You will also be able to contact the software developer for any questions or issues that you may encounter while using the software.

      -

      By buying an official license key, you will also be supporting the software developer and their efforts to create and maintain high-quality software for car enthusiasts like you. You will also be respecting their intellectual property rights and complying with the law.

      -

      Other software tools for tuning and logging data

      -

      If you cannot afford or do not want to buy an official license key for Evoscan software, you can look for other software tools that offer similar functions for tuning and logging data from your car's ECU. Some of these tools are:

      -
        -
      • Tactrix Openport 2.0: This is a hardware device that connects your PC or laptop to your car's OBD-II port using a USB cable. It works with various software applications such as EcuFlash, RomRaider, EcuExplorer, etc. It supports Mitsubishi, Subaru, Mazda, Nissan, Toyota, etc.
      • -
      • OBDwiz: This is a software application that works with any OBD-II compliant scan tool or interface device. It allows you to read and clear DTCs, view real-time data, record data logs, etc. It supports all OBD-II protocols and vehicles.
      • -
      • Kiwi 4: This is a wireless device that connects your smartphone to your car's OBD-II port using Bluetooth. It works with various smartphone apps such as Torque Pro, DashCommand, Carista, etc. It allows you to read and clear DTCs,

        Legal and ethical issues of using cracked software

        -

        Using cracked software is not only risky and unreliable, but also illegal and unethical. By using cracked software, you are violating the software license agreement and the intellectual property rights of the software developer. You are also depriving them of the revenue they deserve for their hard work and innovation. You could face legal consequences such as fines or imprisonment if you are caught using or distributing cracked software .

        -

        Using cracked software is also unethical because it is unfair to the software developer and to other users who pay for the software. You are taking advantage of someone else's work without giving them any credit or compensation. You are also undermining the quality and security of the software industry by encouraging piracy and discouraging innovation. You are also harming yourself by exposing your PC or laptop to malware and other threats that could compromise your data or identity.

        -

        Therefore, you should not use cracked software for any reason. You should respect the software developer and their intellectual property rights. You should also respect yourself and your PC or laptop by using legitimate and safe software.

        -

        Conclusion

        -

        In this article, we have discussed what Evoscan software is and why you might need it. We have also explained what Evoscan 2.7 Serial.rar is and how it works. We have also highlighted the risks and drawbacks of using Evoscan 2.7 Serial.rar to crack Evoscan software. We have also suggested some alternatives to Evoscan 2.7 Serial.rar that are legal and safe.

        -

        Summary of the main points

        -
          -
        • Evoscan is a software tool for tuning and logging data from Mitsubishi, Subaru, and Mazda vehicles.
        • -
        • Evoscan 2.7 Serial.rar is a crack tool that claims to generate a valid serial number for Evoscan software.
        • -
        • Using Evoscan 2.7 Serial.rar to crack Evoscan software is illegal, unethical, unsafe, unreliable, unsupported, and outdated.
        • -
        • You can buy an official license key for Evoscan software from the official website of Evoscan or look for other software tools that offer similar functions for tuning and logging data from your car's ECU.
        • -
        -

        Call to action and recommendations

        -

        If you want to use Evoscan software without breaking the law or risking your PC or laptop's safety, we recommend that you:

        -
          -
        1. Purchase an official license key for Evoscan software from the official website of Evoscan: https://www.evoscan.com/.
        2. -
        3. Enjoy all the features and benefits of Evoscan software without any limitations or restrictions.
        4. -
        5. Access new features, improvements, bug fixes, and security patches that are released by the software developer.
        6. -
        7. Contact the software developer for any questions or issues that you may encounter while using the software.
        8. -
        9. Support the software developer and their efforts to create and maintain high-quality software for car enthusiasts like you.
        10. -
        11. Respect their intellectual property rights and comply with the law.
        12. -
        -

        If you cannot afford or do not want to buy an official license key for Evoscan software, we recommend that you:

        -
          -
        1. Look for other software tools that offer similar functions for tuning and logging data from your car's ECU.
        2. -
        3. Compare their features, benefits, compatibility, requirements, prices, etc.
        4. -
        5. Choose the one that best suits your needs and budget.
        6. -
        7. Purchase a legitimate license key for the chosen software tool from its official website or authorized dealer.
        8. -
        9. Avoid using cracked software for any reason.
        10. -
        -

        We hope this article has been helpful and informative for you. Thank you for reading!

        - # FAQs
          -
        1. What is Evoscan?
          -Evoscan is a software tool for tuning and logging data from Mitsubishi, Subaru, and Mazda vehicles.
        2. -
        3. What is Evoscan 2.7 Serial.rar?
          -Evoscan 2.7 Serial.rar is a crack tool that claims to generate a valid serial number for Evoscan software.
        4. -
        5. What are the risks of using Evoscan 2.7 Serial.rar?
          -Using Evoscan 2.7 Serial.rar to crack Evoscan software is illegal, unethical, unsafe, unreliable, unsupported, and outdated.
        6. -
        7. What are the alternatives to Evoscan 2.7 Serial.rar?
          -You can buy an official license key for Evoscan software from the official website of Evoscan or look for other software tools that offer similar functions for tuning and logging data from your car's ECU.
        8. -
        9. How can I contact the developer of Evoscan?
          -You can contact the developer of Evoscan by email at support@evoscan.com.
        10. -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Game Of Thrones S03e04 720p Hdtv X264evolve 17 The Unsullied Join Daenerys in Her Quest for the Iron Throne.md b/spaces/raedeXanto/academic-chatgpt-beta/Game Of Thrones S03e04 720p Hdtv X264evolve 17 The Unsullied Join Daenerys in Her Quest for the Iron Throne.md deleted file mode 100644 index b7d2af9b7d1de43b1f9da3d4525a0185d359ade1..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Game Of Thrones S03e04 720p Hdtv X264evolve 17 The Unsullied Join Daenerys in Her Quest for the Iron Throne.md +++ /dev/null @@ -1,63 +0,0 @@ - -

        Game of Thrones S03E04: And Now His Watch Is Ended

        -

        Game of Thrones is a fantasy drama series based on the novels by George R.R. Martin. It follows the lives and struggles of various noble families in the fictional continent of Westeros, where seasons can last for years. In this episode, titled "And Now His Watch Is Ended", we witness some of the most shocking and thrilling moments of the third season, as characters face betrayal, death, revenge, and liberation.

        -

        The Night's Watch at Craster's Keep

        -

        The episode begins with a grim scene at Craster's Keep, where the Night's Watch, a military order that guards the Wall against threats from beyond, is taking shelter after a disastrous battle with the White Walkers, undead creatures that can raise the dead. Craster, a wildling who lives with his daughter-wives, is a cruel host who demands food and wine from his guests. He also sacrifices his newborn sons to the White Walkers, a practice that horrifies some of the Night's Watchmen.

        -

        Game Of Thrones S03e04 720p Hdtv X264evolve 17


        DOWNLOAD 🔗 https://tinourl.com/2uL1Vg



        -

        One of them, Karl Tanner, confronts Craster and insults him, leading to a violent brawl. In the chaos, Craster is killed by Tanner, and Lord Commander Jeor Mormont is stabbed in the back by another mutineer. Samwell Tarly, a timid but loyal friend of Jon Snow, witnesses Mormont's death and flees with Gilly, one of Craster's daughters who has just given birth to a son. Sam vows to protect her and her baby from both Craster's men and the White Walkers.

        -

        Jaime and Brienne on the road

        -

        Meanwhile, Jaime Lannister, a captive of House Bolton, is suffering from the loss of his right hand, which was cut off by Locke, one of Roose Bolton's men, in the previous episode. Jaime is mocked and humiliated by his captors, who force him to wear his severed hand around his neck. He also develops an infection in his wound, which makes him weak and feverish.

        -

        Game of Thrones Season 3 Episode 4 subtitles
        -And Now His Watch Is Ended 720p HDTV download
        -Game of Thrones S03E04 EVOLVE torrent
        -Watch Game of Thrones S03E04 online free
        -Game of Thrones S03E04 recap and review
        -Game of Thrones S03E04 streaming HD quality
        -Game of Thrones S03E04 Daenerys Astapor scene
        -Game of Thrones S03E04 Varys revenge plot
        -Game of Thrones S03E04 Arya and the Brotherhood
        -Game of Thrones S03E04 Margaery and Joffrey
        -Game of Thrones S03E04 Theon flashback
        -Game of Thrones S03E04 Night's Watch mutiny
        -Game of Thrones S03E04 best quotes and dialogues
        -Game of Thrones S03E04 behind the scenes and trivia
        -Game of Thrones S03E04 soundtrack and music
        -Game of Thrones S03E04 analysis and theories
        -Game of Thrones S03E04 ratings and viewership
        -Game of Thrones S03E04 cast and crew interviews
        -Game of Thrones S03E04 fan reactions and memes
        -Game of Thrones S03E04 Easter eggs and references
        -Game of Thrones S03E04 differences from the books
        -Game of Thrones S03E04 predictions and spoilers
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles English[^1^]
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles English[^2^]
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Spanish
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Spanish
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles French
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles French
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles German
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles German
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Arabic
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Arabic
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Hindi
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Hindi
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Chinese
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Chinese
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Japanese
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Japanese
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Russian
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Russian
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Portuguese
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Portuguese
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Italian
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Italian
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Turkish
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Turkish
        -Game of Thrones S03E04 720p HDTV x264-EVOLVE subtitles Indonesian
        -And Now His Watch Is Ended HDTV x264-EVOLVE subtitles Indonesian

        -

        Brienne of Tarth, a female knight who was escorting Jaime to King's Landing before they were captured, tries to encourage him to fight for his life. She reminds him that he still has his family, his honor, and his other hand. She also calls him out for his self-pity and challenges him to use his anger as motivation. Jaime reluctantly accepts her advice and drinks some horse urine to clean his wound.

        -

        Theon's escape and recapture

        -

        Theon Greyjoy, who betrayed his adoptive family, the Starks, and seized their castle, Winterfell, in season two, is now a prisoner himself. He is tortured by an unknown man who flays his fingers and asks him riddles. Theon begs for mercy and confesses that he regrets killing two farm boys to pass them off as Bran and Rickon Stark, whom he failed to capture.

        -

        The torturer pretends to be sympathetic and promises to free Theon. He leads him to a secret tunnel where he says a horse is waiting for him. However, as Theon rides away,

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Grand Theft Auto IV (GTA 4) Updated Multi 5 Repack Mr DJ Hack Online.md b/spaces/raedeXanto/academic-chatgpt-beta/Grand Theft Auto IV (GTA 4) Updated Multi 5 Repack Mr DJ Hack Online.md deleted file mode 100644 index 30ce0510a9f783135bddeb413e281c2d8797bbcf..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Grand Theft Auto IV (GTA 4) Updated Multi 5 Repack Mr DJ Hack Online.md +++ /dev/null @@ -1,15 +0,0 @@ - -

        How to Hack Grand Theft Auto IV (GTA 4) Updated Multi 5 Repack by Mr DJ

        -

        Grand Theft Auto IV (GTA 4) is a popular action-adventure game developed by Rockstar North and published by Rockstar Games. It is the eleventh title in the Grand Theft Auto series, and the first main entry since 2004's Grand Theft Auto: San Andreas. The game follows the story of Niko Bellic, a war veteran who comes to Liberty City in search of a new life and a mysterious past[^2^]. The game features an open world environment that allows players to explore, interact, and engage in various activities such as driving, shooting, fighting, and more.

        -

        Grand Theft Auto IV (GTA 4) updated Multi 5 repack Mr DJ hack online


        Download >>>>> https://tinourl.com/2uL129



        -

        If you want to enjoy the game with enhanced graphics, gameplay, and features, you might want to try the updated Multi 5 repack by Mr DJ. This is a modified version of the game that includes all the DLCs, patches, updates, and mods that improve the game's performance and quality. Some of the mods included are Definitive Mod Pack, ENB Series, HD Textures Pack, Realistic Car Pack, and more[^1^]. The repack also supports multiple languages such as English, French, German, Italian, Spanish, Russian, Polish, and Japanese[^1^]. The repack size is 13.3 GB and it can be installed easily by following the instructions provided by Mr DJ[^2^].

        -

        However, if you want to hack the game and get unlimited money, weapons, health, ammo, and other advantages, you might need some extra tools and tricks. Here are some of the ways you can hack Grand Theft Auto IV (GTA 4) updated Multi 5 repack by Mr DJ:

        -
          -
        • Use a trainer. A trainer is a program that runs in the background and modifies the game's memory to enable cheats. You can download a trainer from various websites such as GTA Gaming, GTA All, or GTA Inside. Some of the trainers have features such as god mode, infinite money, no wanted level, teleportation, super speed, super jump, and more. To use a trainer, you need to launch it before or after launching the game and press the corresponding keys to activate the cheats.
        • -
        • Use a cheat engine. A cheat engine is a software that allows you to scan and edit the game's memory values to change various aspects of the game. You can download a cheat engine from Cheat Engine or WeMod. To use a cheat engine, you need to launch it before or after launching the game and attach it to the game's process. Then you can search for the values you want to change such as money, health, ammo, etc. and modify them accordingly.
        • -
        • Use a mod menu. A mod menu is a script that adds a graphical interface to the game that allows you to access various cheats and options. You can download a mod menu from GTA 5 Mods, LCPDFR, or Nexus Mods. Some of the mod menus have features such as spawn vehicles, weapons, peds, objects, weather control, time control, teleportation, gravity control, ragdoll physics, and more. To use a mod menu, you need to install it in your game folder and press a key or button to open it in-game.
        • -
        -

        These are some of the ways you can hack Grand Theft Auto IV (GTA 4) updated Multi 5 repack by Mr DJ. However, be aware that hacking may cause instability or crashes in your game or may get you banned from online multiplayer modes. Therefore

        -

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/raseel-zymr/Document-QandA/app.py b/spaces/raseel-zymr/Document-QandA/app.py deleted file mode 100644 index 3e636a5f44503e1e83644315d3d3a6bfd03cc921..0000000000000000000000000000000000000000 --- a/spaces/raseel-zymr/Document-QandA/app.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import streamlit as st -from pathlib import Path -from io import StringIO - -#for textfiles -from langchain.document_loaders import TextLoader -#text splitter -from langchain.text_splitter import CharacterTextSplitter -#for using HugginFace models & embeddings -from langchain.embeddings import HuggingFaceEmbeddings -from langchain import HuggingFaceHub -# Vectorstore: https://python.langchain.com/en/latest/modules/indexes/vectorstores.html -from langchain.vectorstores import FAISS -#facebook vectorization -from langchain.chains.question_answering import load_qa_chain -#load pdf -#vectorize db index with chromadb -from langchain.indexes import VectorstoreIndexCreator -from langchain.chains import RetrievalQA -from langchain.document_loaders import UnstructuredPDFLoader - -os.environ["HUGGINGFACEHUB_API_TOKEN"] = st.secrets["hf_api_key"] - -def init(): - global embeddings, llm, llm2, chain - # Embeddings - embeddings = HuggingFaceEmbeddings() - llm=HuggingFaceHub(repo_id="declare-lab/flan-alpaca-large", model_kwargs={"temperature":0, "max_length":512}) - chain = load_qa_chain(llm, chain_type="stuff") - -def pdf_file(txtFileObj): - st.subheader('Uploaded PDF File:') - st.write(txtFileObj.name) - - with open(txtFileObj.name, "wb") as f: - f.write(txtFileObj.getbuffer()) - - loaders = [UnstructuredPDFLoader(txtFileObj.name)] - index = VectorstoreIndexCreator( - embedding=embeddings, - text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)).from_loaders(loaders) - - chain = RetrievalQA.from_chain_type(llm=llm, - chain_type="stuff", - retriever=index.vectorstore.as_retriever(), - input_key="question") - - st.subheader('Enter query') - query = st.text_input('Ask anything about the Document you uploaded') - - if (query): - answer = chain.run(question=query) - - st.subheader('Answer') - st.write(answer) - -def text_file(txtFileObj): - st.subheader('Uploaded Text File:') - st.write(txtFileObj.name) - - #stringio = StringIO(txtFileObj.getvalue().decode("utf-8")) - with open(txtFileObj.name, "wb") as f: - f.write(txtFileObj.getbuffer()) - - loader = TextLoader(txtFileObj.name) - documents = loader.load() - - # Text Splitter - text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=10) - docs = text_splitter.split_documents(documents) - - db = FAISS.from_documents(docs, embeddings) - - st.subheader('Enter query') - query = st.text_input('Ask anything about the Document you uploaded') - - if (query): - docs = db.similarity_search(query) - answer = chain.run(input_documents=docs, question=query) - - st.subheader('Answer') - st.write(answer) - -st.title('Document Q&A - Ask anything in your Document') -st.subheader('This application can be used to upload text(.txt) and PDF(.pdf) files and ask questions about their contents.') - -init() - -st.sidebar.subheader('Upload document') -uploaded_file = st.sidebar.file_uploader("Upload File",type=['txt','pdf']) - -if uploaded_file and Path(uploaded_file.name).suffix == '.txt': - st.sidebar.info(Path(uploaded_file.name)) - text_file(uploaded_file) - -if uploaded_file and Path(uploaded_file.name).suffix == '.pdf': - pdf_file(uploaded_file) - -with st.sidebar.expander('File'): - if (uploaded_file): - st.info(uploaded_file.name) -if os.path.exists('/content/'): - st.info(os.listdir('/content/')) \ No newline at end of file diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/vm.d.ts b/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/vm.d.ts deleted file mode 100644 index c96513a50555debf6fd50aa0e414a18d1d342efb..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/@types/node/vm.d.ts +++ /dev/null @@ -1,509 +0,0 @@ -/** - * The `vm` module enables compiling and running code within V8 Virtual - * Machine contexts. - * - * **The `vm` module is not a security** - * **mechanism. Do not use it to run untrusted code.** - * - * JavaScript code can be compiled and run immediately or - * compiled, saved, and run later. - * - * A common use case is to run the code in a different V8 Context. This means - * invoked code has a different global object than the invoking code. - * - * One can provide the context by `contextifying` an - * object. The invoked code treats any property in the context like a - * global variable. Any changes to global variables caused by the invoked - * code are reflected in the context object. - * - * ```js - * const vm = require('vm'); - * - * const x = 1; - * - * const context = { x: 2 }; - * vm.createContext(context); // Contextify the object. - * - * const code = 'x += 40; var y = 17;'; - * // `x` and `y` are global variables in the context. - * // Initially, x has the value 2 because that is the value of context.x. - * vm.runInContext(code, context); - * - * console.log(context.x); // 42 - * console.log(context.y); // 17 - * - * console.log(x); // 1; y is not defined. - * ``` - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/vm.js) - */ -declare module 'vm' { - interface Context extends NodeJS.Dict {} - interface BaseOptions { - /** - * Specifies the filename used in stack traces produced by this script. - * Default: `''`. - */ - filename?: string | undefined; - /** - * Specifies the line number offset that is displayed in stack traces produced by this script. - * Default: `0`. - */ - lineOffset?: number | undefined; - /** - * Specifies the column number offset that is displayed in stack traces produced by this script. - * @default 0 - */ - columnOffset?: number | undefined; - } - interface ScriptOptions extends BaseOptions { - displayErrors?: boolean | undefined; - timeout?: number | undefined; - cachedData?: Buffer | undefined; - /** @deprecated in favor of `script.createCachedData()` */ - produceCachedData?: boolean | undefined; - } - interface RunningScriptOptions extends BaseOptions { - /** - * When `true`, if an `Error` occurs while compiling the `code`, the line of code causing the error is attached to the stack trace. - * Default: `true`. - */ - displayErrors?: boolean | undefined; - /** - * Specifies the number of milliseconds to execute code before terminating execution. - * If execution is terminated, an `Error` will be thrown. This value must be a strictly positive integer. - */ - timeout?: number | undefined; - /** - * If `true`, the execution will be terminated when `SIGINT` (Ctrl+C) is received. - * Existing handlers for the event that have been attached via `process.on('SIGINT')` will be disabled during script execution, but will continue to work after that. - * If execution is terminated, an `Error` will be thrown. - * Default: `false`. - */ - breakOnSigint?: boolean | undefined; - /** - * If set to `afterEvaluate`, microtasks will be run immediately after the script has run. - */ - microtaskMode?: 'afterEvaluate' | undefined; - } - interface CompileFunctionOptions extends BaseOptions { - /** - * Provides an optional data with V8's code cache data for the supplied source. - */ - cachedData?: Buffer | undefined; - /** - * Specifies whether to produce new cache data. - * Default: `false`, - */ - produceCachedData?: boolean | undefined; - /** - * The sandbox/context in which the said function should be compiled in. - */ - parsingContext?: Context | undefined; - /** - * An array containing a collection of context extensions (objects wrapping the current scope) to be applied while compiling - */ - contextExtensions?: Object[] | undefined; - } - interface CreateContextOptions { - /** - * Human-readable name of the newly created context. - * @default 'VM Context i' Where i is an ascending numerical index of the created context. - */ - name?: string | undefined; - /** - * Corresponds to the newly created context for display purposes. - * The origin should be formatted like a `URL`, but with only the scheme, host, and port (if necessary), - * like the value of the `url.origin` property of a URL object. - * Most notably, this string should omit the trailing slash, as that denotes a path. - * @default '' - */ - origin?: string | undefined; - codeGeneration?: - | { - /** - * If set to false any calls to eval or function constructors (Function, GeneratorFunction, etc) - * will throw an EvalError. - * @default true - */ - strings?: boolean | undefined; - /** - * If set to false any attempt to compile a WebAssembly module will throw a WebAssembly.CompileError. - * @default true - */ - wasm?: boolean | undefined; - } - | undefined; - /** - * If set to `afterEvaluate`, microtasks will be run immediately after the script has run. - */ - microtaskMode?: 'afterEvaluate' | undefined; - } - type MeasureMemoryMode = 'summary' | 'detailed'; - interface MeasureMemoryOptions { - /** - * @default 'summary' - */ - mode?: MeasureMemoryMode | undefined; - context?: Context | undefined; - } - interface MemoryMeasurement { - total: { - jsMemoryEstimate: number; - jsMemoryRange: [number, number]; - }; - } - /** - * Instances of the `vm.Script` class contain precompiled scripts that can be - * executed in specific contexts. - * @since v0.3.1 - */ - class Script { - constructor(code: string, options?: ScriptOptions); - /** - * Runs the compiled code contained by the `vm.Script` object within the given`contextifiedObject` and returns the result. Running code does not have access - * to local scope. - * - * The following example compiles code that increments a global variable, sets - * the value of another global variable, then execute the code multiple times. - * The globals are contained in the `context` object. - * - * ```js - * const vm = require('vm'); - * - * const context = { - * animal: 'cat', - * count: 2 - * }; - * - * const script = new vm.Script('count += 1; name = "kitty";'); - * - * vm.createContext(context); - * for (let i = 0; i < 10; ++i) { - * script.runInContext(context); - * } - * - * console.log(context); - * // Prints: { animal: 'cat', count: 12, name: 'kitty' } - * ``` - * - * Using the `timeout` or `breakOnSigint` options will result in new event loops - * and corresponding threads being started, which have a non-zero performance - * overhead. - * @since v0.3.1 - * @param contextifiedObject A `contextified` object as returned by the `vm.createContext()` method. - * @return the result of the very last statement executed in the script. - */ - runInContext(contextifiedObject: Context, options?: RunningScriptOptions): any; - /** - * First contextifies the given `contextObject`, runs the compiled code contained - * by the `vm.Script` object within the created context, and returns the result. - * Running code does not have access to local scope. - * - * The following example compiles code that sets a global variable, then executes - * the code multiple times in different contexts. The globals are set on and - * contained within each individual `context`. - * - * ```js - * const vm = require('vm'); - * - * const script = new vm.Script('globalVar = "set"'); - * - * const contexts = [{}, {}, {}]; - * contexts.forEach((context) => { - * script.runInNewContext(context); - * }); - * - * console.log(contexts); - * // Prints: [{ globalVar: 'set' }, { globalVar: 'set' }, { globalVar: 'set' }] - * ``` - * @since v0.3.1 - * @param contextObject An object that will be `contextified`. If `undefined`, a new object will be created. - * @return the result of the very last statement executed in the script. - */ - runInNewContext(contextObject?: Context, options?: RunningScriptOptions): any; - /** - * Runs the compiled code contained by the `vm.Script` within the context of the - * current `global` object. Running code does not have access to local scope, but _does_ have access to the current `global` object. - * - * The following example compiles code that increments a `global` variable then - * executes that code multiple times: - * - * ```js - * const vm = require('vm'); - * - * global.globalVar = 0; - * - * const script = new vm.Script('globalVar += 1', { filename: 'myfile.vm' }); - * - * for (let i = 0; i < 1000; ++i) { - * script.runInThisContext(); - * } - * - * console.log(globalVar); - * - * // 1000 - * ``` - * @since v0.3.1 - * @return the result of the very last statement executed in the script. - */ - runInThisContext(options?: RunningScriptOptions): any; - /** - * Creates a code cache that can be used with the `Script` constructor's`cachedData` option. Returns a `Buffer`. This method may be called at any - * time and any number of times. - * - * ```js - * const script = new vm.Script(` - * function add(a, b) { - * return a + b; - * } - * - * const x = add(1, 2); - * `); - * - * const cacheWithoutX = script.createCachedData(); - * - * script.runInThisContext(); - * - * const cacheWithX = script.createCachedData(); - * ``` - * @since v10.6.0 - */ - createCachedData(): Buffer; - /** @deprecated in favor of `script.createCachedData()` */ - cachedDataProduced?: boolean | undefined; - cachedDataRejected?: boolean | undefined; - cachedData?: Buffer | undefined; - } - /** - * If given a `contextObject`, the `vm.createContext()` method will `prepare - * that object` so that it can be used in calls to {@link runInContext} or `script.runInContext()`. Inside such scripts, - * the `contextObject` will be the global object, retaining all of its existing - * properties but also having the built-in objects and functions any standard [global object](https://es5.github.io/#x15.1) has. Outside of scripts run by the vm module, global variables - * will remain unchanged. - * - * ```js - * const vm = require('vm'); - * - * global.globalVar = 3; - * - * const context = { globalVar: 1 }; - * vm.createContext(context); - * - * vm.runInContext('globalVar *= 2;', context); - * - * console.log(context); - * // Prints: { globalVar: 2 } - * - * console.log(global.globalVar); - * // Prints: 3 - * ``` - * - * If `contextObject` is omitted (or passed explicitly as `undefined`), a new, - * empty `contextified` object will be returned. - * - * The `vm.createContext()` method is primarily useful for creating a single - * context that can be used to run multiple scripts. For instance, if emulating a - * web browser, the method can be used to create a single context representing a - * window's global object, then run all ` - - - diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Ben 10 Omniverse APK and Join the Galactic Battle.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Ben 10 Omniverse APK and Join the Galactic Battle.md deleted file mode 100644 index 08ad8a3fdf8a53e79450a5444a40e135c748bed7..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Ben 10 Omniverse APK and Join the Galactic Battle.md +++ /dev/null @@ -1,192 +0,0 @@ -
        -

        Ben 10 APK: How to Download and Play the Best Ben 10 Games on Your Android Device

        -

        Introduction

        -

        If you are a fan of Ben 10, the popular animated series that follows the adventures of a boy who can transform into different aliens using a mysterious device called the Omnitrix, you might be wondering how you can play some of the best Ben 10 games on your Android device. Well, you are in luck, because in this article, we will show you how to download and install Ben 10 APKs, which are files that allow you to run Android apps and games on your device without using the Google Play Store. We will also give you some recommendations for the best Ben 10 games that you can play on your Android device, and how to enjoy them to the fullest.

        -

        ben 10 apk


        DOWNLOAD ✦✦✦ https://ssurll.com/2uNQPi



        -

        What is Ben 10?

        -

        Ben 10 is a franchise created by Man of Action Studios and produced by Cartoon Network Studios. It consists of several animated series, movies, comic books, video games, and merchandise. The first series, simply titled Ben 10, debuted in 2005 and introduced the main character, Ben Tennyson, a ten-year-old boy who finds a mysterious device called the Omnitrix that allows him to transform into different aliens with various powers and abilities. Throughout the series, Ben uses his alien forms to fight evil forces, such as his archenemy Vilgax, an intergalactic warlord who wants to steal the Omnitrix for himself.

        -

        The franchise has spawned several sequels and spin-offs, such as Ben 10: Alien Force, Ben 10: Ultimate Alien, Ben 10: Omniverse, and Ben 10 (2016 reboot). Each series follows Ben at different stages of his life, as he gains new alien forms, allies, enemies, and challenges. The franchise has also produced several video games across different platforms, such as PlayStation, Xbox, Nintendo, PC, and mobile devices.

        -

        What is an APK?

        -

        An APK (short for Android Package Kit) is a file format that is used to distribute and install applications and games on Android devices. An APK file contains all the necessary components for an app or game to run on your device, such as code, resources, assets, certificates, and manifest. You can think of an APK file as a zip file that contains everything you need to install an app or game on your device.

        -

        Normally, when you want to install an app or game on your Android device, you would use the Google Play Store, which is the official app store for Android devices. However, sometimes you might want to install an app or game that is not available on the Google Play Store, or that has been removed or blocked by Google for some reason. In that case, you can use an APK file to install the app or game manually on your device.

        -

        Why download Ben 10 APKs?

        -

        There are several reasons why you might want to download and install Ben 10 APKs on your Android device. Here are some of them:

        -

        ben 10 up to speed apk
        -ben 10 challenge apk
        -ben 10 alien force apk
        -ben 10 ultimate alien apk
        -ben 10 omniverse apk
        -ben 10 protector of earth apk
        -ben 10 alien evolution apk
        -ben 10 power trip apk
        -ben 10 heroes apk
        -ben 10 vilgax attacks apk
        -ben 10 cosmic destruction apk
        -ben 10 alien experience apk
        -ben 10 mod apk
        -ben 10 wrath of psychobos apk
        -ben 10 reboot apk
        -ben 10 omniverse collection apk
        -ben 10 alien unlock apk
        -ben 10 game generator apk
        -ben 10 xenodrome plus apk
        -ben 10 ultimate challenge apk
        -ben 10 alien force vilgax attacks apk
        -ben 10 ultimate defense apk
        -ben 10 omnitrix power apk
        -ben 10 alien swarm smash apk
        -ben 10 ultimate crisis apk
        -ben 10 galactic racing apk
        -ben 10 omniverse rise of heroes apk
        -ben 10 alien escape apk
        -ben 10 savage pursuit apk
        -ben 10 battle ready apk
        -ben 10 fighting games apk
        -ben 10 coloring book apk
        -ben 10 puzzle games apk
        -ben 10 cartoon network games apk
        -ben 10 omniverse games free download for android apk
        -download game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis - game ppsspp iso android high compress terbaru full version gratis -

        -
          -
        • You are a fan of Ben 10 and you want to play some of the best Ben 10 games that are not available on the Google Play Store.
        • -
        • You want to play some older or classic Ben 10 games that are no longer supported or updated by the developers.
        • -
        • You want to play some Ben 10 games that have been modified or hacked by fans to add new features, levels, characters, or cheats.
        • -
        • You want to play some Ben 10 games that are exclusive to certain regions or countries, and are not available in your location.
        • -
        • You want to save some storage space on your device by installing only the APK file of the game, and not the additional data or obb files that are usually required by the Google Play Store.
        • -
        -

        Whatever your reason is, downloading and installing Ben 10 APKs can be a fun and easy way to enjoy some of the best Ben 10 games on your Android device. However, you need to be careful and follow some steps to ensure that you are downloading and installing safe and reliable APK files, and not malware or viruses that can harm your device or compromise your privacy.

        -

        How to Download and Install Ben 10 APKs

        -

        Downloading and installing Ben 10 APKs is not a complicated process, but you need to follow some steps to make sure that you are doing it correctly and safely. Here are the steps that you need to follow:

        -

        Step 1: Find a reliable source for Ben 10 APKs

        -

        The first and most important step is to find a trustworthy and reputable source for Ben 10 APKs. There are many websites and apps that offer APK files for download, but not all of them are safe and reliable. Some of them might contain malware or viruses that can infect your device or steal your personal information. Some of them might also offer fake or outdated APK files that do not work properly or cause errors on your device.

        -

        To avoid these risks, you should always do some research before downloading any APK file from any source. You should check the reviews, ratings, comments, and feedback from other users who have downloaded the same APK file. You should also check the reputation and credibility of the website or app that offers the APK file. You should avoid any website or app that looks suspicious, has a lot of pop-up ads, asks for unnecessary permissions, or has a poor design or layout.

        -

        Some of the best sources for Ben 10 APKs are:

        -
          -
        • [APKPure]: This is one of the most popular and trusted websites for downloading APK files. It offers a large collection of Ben 10 games, as well as other apps and games for Android devices. It also provides detailed information about each APK file, such as size, version, developer, description, screenshots, and more. It also verifies each APK file with a signature verification system to ensure its safety and authenticity.
        • -
        • [APKMirror]: This is another well-known and reputable website for downloading APK files. It offers a variety of Ben 10 games, as well as other apps and games for Android devices. It also provides detailed information about each APK file, such as size, version, developer, description, screenshots, and more. It also verifies each APK file with a signature verification system to ensure its safety and authenticity.
        • -
        • [HappyMod]: This is a unique and interesting website for downloading APK files. It offers a lot of Ben 10 games that have been modified or hacked by fans to add new features, levels, characters, or cheats. It also provides detailed information about each APK file, such as size, version, developer, description, screenshots, and more. It also verifies each APK file with a signature verification system to ensure its safety and authenticity.
        • -
        -

        These are just some examples of reliable sources for Ben 10 APKs. You can also find other sources by doing some online research or asking for recommendations from other Ben 10 fans. However, you should always be careful and cautious when downloading any APK file from any source.

        -

        Step 2: Enable unknown sources on your Android device

        -

        The next step is to enable unknown sources on your Android device. This is a security setting that allows you to install apps and games from sources other than the Google Play Store. By default, this setting is disabled on most Android devices, which means that you cannot install any APK file on your device unless you enable it first.

        -

        To enable unknown sources on your Android device, you need to follow these steps:

        -
          -
        1. Go to the Settings app on your device.
        2. -
        3. Tap on Security or Privacy (depending on your device model).
        4. -
        5. Find and tap on Unknown sources or Install unknown apps (depending on your device model).
        6. -
        7. Toggle on the switch or check the box to allow installation from unknown sources.
        8. -
        9. A warning message will appear on your screen. Read it carefully and tap on OK or Confirm to proceed.
        10. -
        -

        Once you have enabled unknown sources on your device, you are ready to download and install Ben 10 APKs on your device.

        -

        Step 3: Download and install the Ben 10 APK of your choice

        -

        The final step is to download and install the Ben 10 APK of your choice on your device. To do this, you need to follow these steps:

        -
          -
        1. Go to the website or app that offers the Ben 10 APK that you want to download. For example, if you want to download Ben 10 Omniverse: Race Against Time, you can go to [APKPure] and search for it.
        2. -
        3. Tap on the Download or Install button to start downloading the APK file to your device. You might see a pop-up window asking you to confirm the download. Tap on OK or Yes to continue.
        4. -
        5. Once the download is complete, you will see a notification on your device. Tap on it to open the APK file.
        6. -
        7. You will see a screen asking you to install the app or game. Tap on Install to begin the installation process. You might see some permissions that the app or game requires. Tap on Accept or Allow to grant them.
        8. -
        9. Wait for the installation to finish. You will see a screen saying that the app or game has been installed successfully. Tap on Open to launch it, or Done to exit.
        10. -
        -

        Congratulations! You have successfully downloaded and installed a Ben 10 APK on your Android device. You can now enjoy playing some of the best Ben 10 games on your device.

        -

        How to Play Ben 10 Games on Your Android Device

        -

        Now that you have downloaded and installed a Ben 10 APK on your device, you might be wondering how to play it and what features it has. In this section, we will give you some recommendations for the best Ben 10 games that you can play on your Android device, and how to play them. We will also provide some screenshots and tables to show you some of the features and gameplay of each game.

        -

        Ben 10 Omniverse: Race Against Time

        -

        This is one of the most popular and fun Ben 10 games that you can play on your Android device. It is based on the Ben 10: Omniverse series, which follows Ben as he teams up with his partner Rook and his younger self from the original series to fight against new and old enemies. In this game, you can control both Ben and Rook as they race against time to stop Malware, a corrupted Galvanic Mechamorph who wants to destroy all life in the universe.

        -

        Features

        -

        Some of the features of this game are:

        -
          -
        • You can transform into different aliens using the Omnitrix, such as Heatblast, Diamondhead, Four Arms, XLR8, Feedback, Gravattack, Bloxx, Shocksquatch, and more.
        • -
        • You can use different weapons and gadgets as Rook, such as Proto-Tool, Grappler, Laser Lance, Magno Boots, and more.
        • -
        • You can explore different locations from the series, such as Undertown, Plumber Base, Bellwood, Galvan Prime, Khyber's Ship, and more.
        • -
        • You can fight against different enemies from the series, such as Malware, Khyber, Zombozo, Psyphon, Dr. Animo, and more.
        • -
        • You can collect different items and power-ups along the way, such as Omnitrix energy, health packs, coins, badges, trophies, and more.
        • -
        • You can unlock different achievements and rewards as you progress through the game.
        • -
        -

        The following table shows some of the aliens that you can transform into in this game:

        - | Alien | Power | Weakness | | --- | --- | --- | | Heatblast | Fire blasts, fireballs, fire walls | Water | | Diamondhead | Crystal projectiles, crystal shields | Sonic waves | | Four Arms | Super strength, shockwaves | Electricity | | XLR8 | Super speed, sharp claws | Ice | | Feedback | Energy absorption, energy blasts | Energy overload | | Gravattack | Gravity manipulation, levitation | Magnetism | | Bloxx | Shape-shifting, building blocks | Acid | | Shocksquatch | Electricity generation, enhanced strength | Insulation |

        How to play

        -

        To play this game on your Android device, you need to follow these steps:

        -
          -
        1. Launch the game from your device's menu or home screen.
        2. -
        3. Select a language from the options available.
        4. -
        5. Tap on Play to start a new game, or Continue to resume a previous game.
        6. -
        7. Select a difficulty level from Easy, Normal, or Hard.
        8. -
        9. Select a level from the map screen. You can only select the levels that you have unlocked or completed before.
        10. -
        11. Follow the instructions and tips on the screen to play the game. You can use the virtual buttons on the screen to control your character, such as move, jump, attack, transform, switch, and pause.
        12. -
        13. Complete the objectives of each level, such as reaching the end point, defeating enemies, collecting items, and more.
        14. -
        15. Watch the cutscenes and dialogues between the characters to follow the story of the game.
        16. -
        17. After completing each level, you will see your score and performance. You can also see your achievements and rewards that you have unlocked.
        18. -
        19. Tap on Next to proceed to the next level, or Replay to play the same level again.
        20. -
        -

        You can also access the Options menu from the main menu or the pause menu. Here, you can adjust the sound, music, language, and controls of the game. You can also view your achievements and rewards that you have unlocked in the game.

        -

        Ben 10 Alien Force: Vilgax Attacks

        -

        This is another great Ben 10 game that you can play on your Android device. It is based on the Ben 10: Alien Force series, which follows Ben as he leads a team of young superheroes called the Alien Force, consisting of his cousin Gwen and his former enemy Kevin. In this game, you can control Ben as he battles against Vilgax, an intergalactic warlord who wants to conquer the universe using a powerful weapon called the Null Void Projector.

        -

        Features

        -

        Some of the features of this game are:

        -
          -
        • You can transform into different aliens using the Omnitrix, such as Swampfire, Echo Echo, Humungousaur, Jetray, Big Chill, Spidermonkey, Goop, Chromastone, Brainstorm, and Cannonbolt.
        • -
        • You can explore different planets from the series, such as Vulpin, Terradino, Anur Phaetos, Encephalonus IV, Mor' Otesi, and Vilgaxia.
        • -
        • You can fight against different enemies from the series, such as Vilgax, Psyphon, Sixsix, Vulkanus, Albedo, Darkstar, and more.
        • -
        • You can collect different items and power-ups along the way, such as Omnitrix energy, health packs, coins, Sumo Slammer cards, and more.
        • -
        • You can unlock different achievements and rewards as you progress through the game.
        • -
        -

        The following table shows some of the aliens that you can transform into in this game:

        - | Alien | Power | Weakness | | --- | --- | --- | | Swampfire | Fire blasts, plant growth | Ice | | Echo Echo | Sound waves, cloning | Electricity | | Humungousaur | Super strength , growth | Sonic waves | | Jetray | Flight, laser beams | Magnetism | | Big Chill | Ice breath, intangibility | Fire | | Spidermonkey | Web shooting, agility | Strength | | Goop | Shape-shifting, acid | Anti-gravity projector | | Chromastone | Energy absorption, energy blasts | Cracks | | Brainstorm | Electricity generation, telekinesis | Water | | Cannonbolt | Rolling, armor | Speed |

        How to play

        -

        To play this game on your Android device, you need to follow these steps:

        -
          -
        1. Launch the game from your device's menu or home screen.
        2. -
        3. Select a language from the options available.
        4. -
        5. Tap on Play to start a new game, or Continue to resume a previous game.
        6. -
        7. Select a difficulty level from Easy, Normal, or Hard.
        8. -
        9. Select a planet from the map screen. You can only select the planets that you have unlocked or completed before.
        10. -
        11. Follow the instructions and tips on the screen to play the game. You can use the virtual buttons on the screen to control your character, such as move, jump, attack, transform, and pause.
        12. -
        13. Complete the objectives of each planet, such as reaching the end point, defeating enemies, collecting items, and more.
        14. -
        15. Watch the cutscenes and dialogues between the characters to follow the story of the game.
        16. -
        17. After completing each planet, you will see your score and performance. You can also see your achievements and rewards that you have unlocked.
        18. -
        19. Tap on Next to proceed to the next planet, or Replay to play the same planet again.
        20. -
        -

        You can also access the Options menu from the main menu or the pause menu. Here, you can adjust the sound, music, language, and controls of the game. You can also view your achievements and rewards that you have unlocked in the game.

        -

        Ben 10 Ultimate Alien: Cosmic Destruction

        -

        This is another awesome Ben 10 game that you can play on your Android device. It is based on the Ben 10: Ultimate Alien series, which follows Ben as he gains access to a new feature of the Omnitrix called the Ultimatrix, which allows him to evolve his alien forms into more powerful versions called Ultimate Forms. In this game, you can control Ben as he travels across different countries and planets to find ancient alien artifacts that can help him stop a cosmic storm that threatens to destroy the universe.

        -

        Features

        -

        Some of the features of this game are:

        -
          -
        • You can transform into different aliens using the Ultimatrix, such as Swampfire, Echo Echo, Humungousaur, Jetray, Big Chill, Spidermonkey , Goop, Chromastone, Brainstorm, and Cannonbolt. You can also unlock and use their Ultimate Forms, such as Ultimate Swampfire, Ultimate Echo Echo, Ultimate Humungousaur, Ultimate Big Chill, Ultimate Spidermonkey, and more.
        • -
        • You can explore different locations from the series, such as Tokyo, Paris, Rome, Great Wall of China, Amazon Rainforest, Colosseum, Catacombs, and more.
        • -
        • You can fight against different enemies from the series, such as Aggregor, Zombozo, Vulkanus, Enoch, Sunder, and more.
        • -
        • You can collect different items and power-ups along the way, such as Omnitrix energy, health packs, coins, Sumo Slammer cards, and more.
        • -
        • You can unlock different achievements and rewards as you progress through the game.
        • -
        -

        The following table shows some of the aliens that you can transform into in this game:

        - | Alien | Power | Weakness | | --- | --- | --- | | Swampfire | Fire blasts, plant growth | Ice | | Echo Echo | Sound waves, cloning | Electricity | | Humungousaur | Super strength, growth | Sonic waves | | Jetray | Flight, laser beams | Magnetism | | Big Chill | Ice breath, intangibility | Fire | | Spidermonkey | Web shooting, agility | Strength | | Goop | Shape-shifting, acid | Anti-gravity projector | | Chromastone | Energy absorption, energy blasts | Cracks | | Brainstorm | Electricity generation, telekinesis | Water | | Cannonbolt | Rolling, armor | Speed |

        The following table shows some of the Ultimate Forms that you can use in this game:

        - | Ultimate Form | Power | Weakness | | --- | --- | --- | | Ultimate Swampfire | Enhanced fire blasts, explosive seeds | Ice | | Ultimate Echo Echo | Enhanced sound waves, sonic disks | Electricity | | Ultimate Humungousaur | Enhanced strength , missile launchers | Sonic waves | | Ultimate Big Chill | Enhanced ice breath, fire breath | Fire, ice | | Ultimate Spidermonkey | Enhanced web shooting, strength, agility | Strength, speed | | Ultimate Goop | Enhanced shape-shifting, acid, anti-gravity projector | Anti-gravity projector | | Ultimate Chromastone | Enhanced energy absorption, energy blasts, flight | Cracks | | Ultimate Brainstorm | Enhanced electricity generation, telekinesis, intelligence | Water |

        How to play

        -

        To play this game on your Android device, you need to follow these steps:

        -
          -
        1. Launch the game from your device's menu or home screen.
        2. -
        3. Select a language from the options available.
        4. -
        5. Tap on Play to start a new game, or Continue to resume a previous game.
        6. -
        7. Select a difficulty level from Easy, Normal, or Hard.
        8. -
        9. Select a location from the map screen. You can only select the locations that you have unlocked or completed before.
        10. -
        11. Follow the instructions and tips on the screen to play the game. You can use the virtual buttons on the screen to control your character, such as move, jump, attack, transform, and pause.
        12. -
        13. Complete the objectives of each location, such as reaching the end point, defeating enemies, collecting items, and more.
        14. -
        15. Watch the cutscenes and dialogues between the characters to follow the story of the game.
        16. -
        17. After completing each location, you will see your score and performance. You can also see your achievements and rewards that you have unlocked.
        18. -
        19. Tap on Next to proceed to the next location, or Replay to play the same location again.
        20. -
        -

        You can also access the Options menu from the main menu or the pause menu. Here, you can adjust the sound, music, language, and controls of the game. You can also view your achievements and rewards that you have unlocked in the game.

        -

        Conclusion

        -

        In conclusion, Ben 10 APKs are a great way to enjoy some of the best Ben 10 games on your Android device. You can download and install them easily and safely by following some simple steps. You can also play them and have fun with different aliens, locations, enemies, and features. However, you should always be careful and cautious when downloading any APK file from any source. You should also respect the rights and property of the developers and creators of the games. We hope that this article has helped you learn more about Ben 10 APKs and how to download and play them on your Android device. If you have any questions or comments, please feel free to share them with us below.

        -

        FAQs

        -

        Here are some of the frequently asked questions about Ben 10 APKs:

        -
          -
        1. Are Ben 10 APKs legal?
        2. -

          Ben 10 APKs are legal as long as they are not pirated or modified versions of the original games. If you download and install Ben 10 APKs from reliable and reputable sources that offer genuine and authentic APK files of the games, then you are not breaking any law. However, if you download and install Ben 10 APKs from dubious and shady sources that offer pirated or modified versions of the games that violate the rights and property of the developers and creators of the games , then you are breaking the law and risking legal consequences. Therefore, you should always be careful and cautious when downloading any APK file from any source.

          -
        3. Are Ben 10 APKs safe?
        4. -

          Ben 10 APKs are safe as long as they are not infected with malware or viruses that can harm your device or compromise your privacy. If you download and install Ben 10 APKs from reliable and reputable sources that verify and scan each APK file with a signature verification system to ensure its safety and authenticity, then you are not exposing your device or data to any danger. However, if you download and install Ben 10 APKs from dubious and shady sources that do not verify or scan each APK file with a signature verification system, then you are exposing your device or data to potential threats. Therefore, you should always be careful and cautious when downloading any APK file from any source.

          -
        5. Do Ben 10 APKs require root access?
        6. -

          Ben 10 APKs do not require root access to run on your Android device. Root access is a process that allows you to gain full control over your device's system and settings, which can be useful for some advanced users who want to customize their device or install certain apps or games that are not compatible with their device. However, root access also comes with some risks and disadvantages, such as voiding your device's warranty, making your device more vulnerable to malware or viruses, or causing your device to malfunction or brick. Therefore, root access is not recommended for most users who just want to enjoy some Ben 10 games on their device. Ben 10 APKs can run smoothly and safely on your device without requiring root access.

          -
        7. Can I play Ben 10 APKs offline?
        8. -

          Ben 10 APKs can be played offline without requiring an internet connection. However, some Ben 10 APKs might require an internet connection for some features or functions, such as downloading additional data or obb files, updating the game, accessing online leaderboards or multiplayer modes, or syncing your progress or achievements with your account. Therefore, you should always check the requirements and specifications of each Ben 10 APK before downloading and installing it on your device.

          -
        9. Can I play Ben 10 APKs with friends?
        10. -

          Ben 10 APKs can be played with friends in different ways, depending on the game. Some Ben 10 games offer multiplayer modes that allow you to play with or against other players online or locally. For example, Ben 10 Omniverse: Race Against Time offers a co-op mode that allows you to play with another player on the same device, and a versus mode that allows you to play against another player on a different device via Bluetooth or Wi-Fi. Some Ben 10 games also offer social features that allow you to share your score or achievements with your friends on social media platforms, such as Facebook or Twitter. For example, Ben 10 Alien Force: Vilgax Attacks offers a Facebook integration that allows you to post your score or achievements on your Facebook wall. Therefore, you can enjoy playing Ben 10 games with your friends in different ways.

          197e85843d
          -
          -
          \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Experience the Power of Technology in the Medical Field with RT-PCR App on Windows 10.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Experience the Power of Technology in the Medical Field with RT-PCR App on Windows 10.md deleted file mode 100644 index 2bdcbc822312e909fd59595312eafe173520f9d3..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Experience the Power of Technology in the Medical Field with RT-PCR App on Windows 10.md +++ /dev/null @@ -1,123 +0,0 @@ - -

          RT-PCR app download for windows 10: A guide for medical staff

          -

          If you are a medical staff working at a sample collection centre for COVID-19 testing, you may have heard of the RT-PCR app. This app is a hand held tool that helps you send samples for testing, view collection details and get advance intimation from ICMR. But how can you use this app on your windows 10 device? In this article, we will explain what is RT-PCR, what is the RT-PCR app, how it works and how to download and install it on your windows 10 PC or laptop.

          -

          What is RT-PCR and why is it important for COVID-19 testing?

          -

          RT-PCR is a laboratory technique that detects the genetic material of the virus

          -

          RT-PCR stands for reverse transcription polymerase chain reaction. It is a laboratory technique that combines reverse transcription of RNA into DNA and amplification of specific DNA targets using polymerase chain reaction. It is primarily used to measure the amount of a specific RNA, such as the RNA of a virus.

          -

          rt-pcr app download for windows 10


          DOWNLOADhttps://ssurll.com/2uO0El



          -

          RT-PCR is one of the most accurate and widely used methods for diagnosing COVID-19

          -

          COVID-19 is caused by a novel coronavirus called SARS-CoV-2. This virus only contains RNA, which means that it relies on infiltrating healthy cells to multiply and survive. By using RT-PCR, scientists can detect the presence of SARS-CoV-2 RNA in a sample, such as a nasal swab or saliva. This indicates that the person is currently infected with the virus.

          -

          RT-PCR is one of the most accurate and widely used methods for diagnosing COVID-19 because it has high sensitivity and specificity. Sensitivity means that it can detect very low levels of viral RNA in a sample, which reduces the chances of false negative results. Specificity means that it can distinguish between different types of viral RNA, which reduces the chances of false positive results.

          -

          How to download RT-PCR app on PC with emulator
          -RT-PCR app for Windows 10 free download
          -Best emulator for RT-PCR app on PC
          -RT-PCR app for PC latest version
          -RT-PCR app for Windows 10 64 bit
          -RT-PCR app for PC offline installer
          -RT-PCR app for Windows 10 laptop
          -RT-PCR app for PC review
          -RT-PCR app for Windows 10 update
          -RT-PCR app for PC download link
          -RT-PCR app for Windows 10 pro
          -RT-PCR app for PC system requirements
          -RT-PCR app for Windows 10 troubleshooting
          -RT-PCR app for PC features
          -RT-PCR app for Windows 10 compatibility
          -RT-PCR app for PC tutorial
          -RT-PCR app for Windows 10 installation guide
          -RT-PCR app for PC benefits
          -RT-PCR app for Windows 10 alternatives
          -RT-PCR app for PC FAQs

          -

          What is the RT-PCR app and how does it work?

          -

          The RT-PCR app is a tool for medical staff at sample collection centres

          -

          The RT-PCR app is a mobile application developed by the Indian Council of Medical Research (ICMR) in collaboration with the National Informatics Centre (NIC). It is a tool for medical staff working at sample collection centres for COVID-19 testing across India.

          -

          The RT-PCR app allows users to send samples for testing, view collection details and get advance intimation from ICMR

          -

          The RT-PCR app has several features that make the sample collection process easier and faster. Users can scan the barcode of the sample tube and enter the patient details, such as name, age, gender, address, contact number, travel history and symptoms. Users can also select the testing laboratory from a list of ICMR-approved labs and send the sample for testing. Users can view the collection details, such as date, time, location and status of the sample. Users can also get advance intimation from ICMR about the test results and download the test reports.

          -

          The RT-PCR app is not meant for individuals or patients undergoing the test

          -

          The RT-PCR app is only meant for medical staff working at sample collection centres. It is not meant for individuals or patients undergoing the test. Individuals or patients can check their test results on the ICMR portal or through SMS alerts. They can also download the Aarogya Setu app, which is a contact tracing and self-assessment app for COVID-19.

          -

          How to download and install the RT-PCR app on windows 10?

          -

          The RT-PCR app is available for Android and iOS devices, but not for windows 10

          -

          The RT-PCR app is compatible with Android and iOS devices. It can be downloaded from the Google Play Store or the App Store for free. However, the RT-PCR app is not available for windows 10 devices, such as PC or laptop.

          -

          To use the RT-PCR app on windows 10, users need to install an emulator such as LDPlayer or BlueStacks

          -

          An emulator is a software that allows users to run Android apps on their PC or Mac. There are many emulators available online, but some of the most popular ones are LDPlayer and BlueStacks. These emulators are easy to install and use, and they have high performance and compatibility with most Android apps.

          -

          The emulator allows users to run Android apps on their PC or Mac

          -

          By using an emulator, users can enjoy the features and benefits of Android apps on their PC or Mac. They can access a larger screen, keyboard and mouse, better graphics and sound quality, and more storage space. They can also use multiple apps at the same time, switch between apps easily, and customize their settings according to their preferences.

          Steps to download and install LDPlayer

          -

          To download and install LDPlayer on your windows 10 device, follow these steps:

          -
            -
          1. Go to the official website of LDPlayer and click on the download button.
          2. -
          3. Wait for the file to download and then run it as an administrator.
          4. -
          5. Follow the instructions on the screen and agree to the terms and conditions.
          6. -
          7. Select the destination folder and click on install.
          8. -
          9. Wait for the installation to complete and then launch LDPlayer.
          10. -
          -

          Steps to download and install BlueStacks

          -

          To download and install BlueStacks on your windows 10 device, follow these steps:

          -
            -
          1. Go to the official website of BlueStacks and click on the download button.
          2. -
          3. Wait for the file to download and then run it as an administrator.
          4. -
          5. Follow the instructions on the screen and agree to the terms and conditions.
          6. -
          7. Select the destination folder and click on install.
          8. -
          9. Wait for the installation to complete and then launch BlueStacks.
          10. -
          -

          Steps to download and install RT-PCR app using the emulator

          -

          To download and install RT-PCR app using the emulator, follow these steps:

          -
            -
          1. Open the emulator and sign in with your Google account.
          2. -
          3. Go to the Google Play Store or the App Store and search for RT-PCR app.
          4. -
          5. Select the app from the list of results and click on install.
          6. -
          7. Wait for the app to download and install on your emulator.
          8. -
          9. Open the app and sign in with your ICMR credentials.
          10. -
          -

          Conclusion

          -

          The RT-PCR app is a useful tool for medical staff involved in COVID-19 testing

          -

          The RT-PCR app is a mobile application that helps medical staff at sample collection centres for COVID-19 testing. It allows users to send samples for testing, view collection details and get advance intimation from ICMR. It also helps users to access test reports and track the status of samples. The RT-PCR app is one of the initiatives taken by ICMR to improve the efficiency and quality of COVID-19 testing in India.

          -

          The RT-PCR app can be used on windows 10 with the help of an emulator

          -

          The RT-PCR app is compatible with Android and iOS devices, but not with windows 10 devices. However, users can still use the app on their windows 10 PC or laptop with the help of an emulator. An emulator is a software that allows users to run Android apps on their PC or Mac. Users can choose from various emulators available online, such as LDPlayer or BlueStacks. Users can download and install the emulator on their windows 10 device, and then download and install the RT-PCR app using the emulator. This way, users can enjoy the features and benefits of the RT-PCR app on their windows 10 device.

          -

          The RT-PCR app is not a substitute for professional medical advice or diagnosis

          -

          The RT-PCR app is a tool for medical staff working at sample collection centres, not for individuals or patients undergoing the test. The app does not provide any medical advice or diagnosis, nor does it guarantee the accuracy or reliability of the test results. Users should always consult a qualified medical professional before making any health-related decisions based on the test results. Users should also follow all the safety precautions and guidelines issued by ICMR, WHO and other authorities regarding COVID-19 testing.

          -

          FAQs

          -

          What are the benefits of using the RT-PCR app?

          -

          Some of the benefits of using the RT-PCR app are:

          -
            -
          • It reduces human errors and delays in sample collection and testing.
          • -
          • It improves data quality and security by eliminating paper-based records.
          • -
          • It enhances communication and coordination between sample collection centres and testing laboratories.
          • -
          • It provides real-time information and updates on sample status and test results.
          • -
          • It supports data analysis and reporting for COVID-19 surveillance and management.
          • -
          -

          What are the limitations of using the RT-PCR app?

          -

          Some of the limitations of using the RT-PCR app are:

          -
            -
          • It requires a stable internet connection and a compatible device to function properly.
          • -
          • It may encounter technical glitches or errors that may affect its performance or accuracy.
          • -
          • It may not be compatible with all the testing laboratories or protocols in India.
          • -
          • It may not reflect the latest updates or changes in the COVID-19 testing guidelines or criteria.
          • -
          • It may not be accessible or user-friendly for all the medical staff or patients.
          • -
          -

          How to update the RT-PCR app on windows 10?

          -

          To update the RT-PCR app on windows 10, follow these steps:

          -
            -
          1. Open the emulator and launch the RT-PCR app.
          2. -
          3. Go to the settings menu and check for updates.
          4. -
          5. If there is an update available, click on download and install.
          6. -
          7. Wait for the update to complete and restart the app.
          8. -
          -

          How to troubleshoot the RT-PCR app on windows 10?

          -

          If you encounter any problems or issues with the RT-PCR app on windows 10, you can try these solutions:

          -
            -
          • Check your internet connection and make sure it is stable and fast.
          • -
          • Check your device and make sure it has enough storage space and battery power.
          • -
          • Check your emulator and make sure it is updated and configured correctly.
          • -
          • Check your RT-PCR app and make sure it is updated and signed in with your ICMR credentials.
          • -
          • Clear the cache and data of the app and restart it.
          • -
          • Uninstall and reinstall the app using the emulator.
          • -
          • Contact the support team of the RT-PCR app through email or phone.
          • -
          -

          How to contact the support team of the RT-PCR app?

          -

          If you have any questions, feedback or complaints about the RT-PCR app, you can contact the support team of the app through these channels:

          -
            -
          • Email: rtpcrapp.support@icmr.gov.in
          • -
          • Phone: 011-26589699 / 011-26589794 / 011-26589336 / 011-26588707
          • -

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/snowr3/hakurei-waifu-diffusion/README.md b/spaces/snowr3/hakurei-waifu-diffusion/README.md deleted file mode 100644 index 3c1de756d7daa1a4acf98216d89f7ab89c5c0705..0000000000000000000000000000000000000000 --- a/spaces/snowr3/hakurei-waifu-diffusion/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Hakurei Waifu Diffusion -emoji: 🐢 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/soggys/pompoms/Dockerfile b/spaces/soggys/pompoms/Dockerfile deleted file mode 100644 index eef259fa372a804549fb0af0913718a13344da34..0000000000000000000000000000000000000000 --- a/spaces/soggys/pompoms/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM node:18-bullseye-slim -RUN apt-get update && \ - apt-get install -y git -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app -WORKDIR /app -RUN npm install -COPY Dockerfile greeting.md* .env* ./ -RUN npm run build -EXPOSE 7860 -ENV NODE_ENV=production -CMD [ "npm", "start" ] diff --git a/spaces/songwy/VITS-Umamusume-voice-synthesizer/text/cantonese.py b/spaces/songwy/VITS-Umamusume-voice-synthesizer/text/cantonese.py deleted file mode 100644 index b66d12138b81b70b86f18217d24a08fce76305c0..0000000000000000000000000000000000000000 --- a/spaces/songwy/VITS-Umamusume-voice-synthesizer/text/cantonese.py +++ /dev/null @@ -1,59 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('jyutjyu') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'ei˥'), - ('B', 'biː˥'), - ('C', 'siː˥'), - ('D', 'tiː˥'), - ('E', 'iː˥'), - ('F', 'e˥fuː˨˩'), - ('G', 'tsiː˥'), - ('H', 'ɪk̚˥tsʰyː˨˩'), - ('I', 'ɐi˥'), - ('J', 'tsei˥'), - ('K', 'kʰei˥'), - ('L', 'e˥llou˨˩'), - ('M', 'ɛːm˥'), - ('N', 'ɛːn˥'), - ('O', 'ou˥'), - ('P', 'pʰiː˥'), - ('Q', 'kʰiːu˥'), - ('R', 'aː˥lou˨˩'), - ('S', 'ɛː˥siː˨˩'), - ('T', 'tʰiː˥'), - ('U', 'juː˥'), - ('V', 'wiː˥'), - ('W', 'tʊk̚˥piː˥juː˥'), - ('X', 'ɪk̚˥siː˨˩'), - ('Y', 'waːi˥'), - ('Z', 'iː˨sɛːt̚˥') -]] - - -def number_to_cantonese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: cn2an.an2cn(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def cantonese_to_ipa(text): - text = number_to_cantonese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/sophiamyang/Panel_InstructPix2Pix/README.md b/spaces/sophiamyang/Panel_InstructPix2Pix/README.md deleted file mode 100644 index c7661a37b8b5f8b9f2d064fff0a09e7441965c12..0000000000000000000000000000000000000000 --- a/spaces/sophiamyang/Panel_InstructPix2Pix/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Panel InstructPix2Pix -emoji: 🏃 -colorFrom: purple -colorTo: purple -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sophiamyang/Panel_apps/text_analysis.py b/spaces/sophiamyang/Panel_apps/text_analysis.py deleted file mode 100644 index a3e7a09c156546b0c8bb96793dfb625fcab1876a..0000000000000000000000000000000000000000 --- a/spaces/sophiamyang/Panel_apps/text_analysis.py +++ /dev/null @@ -1,352 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# In[1]: - - -import panel as pn -import requests -import pandas as pd -from textblob import TextBlob -pn.extension() -pn.extension('tabulator') -import warnings -warnings.filterwarnings('ignore') - - -# In[2]: - - -sample_text = """ -Happiness is a very complicated thing. Happiness can be used both in emotional or mental state context and can vary largely from a feeling from contentment to very intense feeling of joy. It can also mean a life of satisfaction, good well-being and so many more. Happiness is a very difficult phenomenon to use words to describe as it is something that can be felt only. Happiness is very important if we want to lead a very good life. Sadly, happiness is absent from the lives of a lot of people nowadays. We all have our own very different concept of happiness. Some of us are of the opinion that we can get happiness through money, others believe they can only get true happiness in relationships, some even feel that happiness can only be gotten when they are excelling in their profession. -As we might probably know, happiness is nothing more than the state of one being content and happy. A lot of people in the past, present and some (even in the future will) have tried to define and explain what they think happiness really is. So far, the most reasonable one is the one that sees happiness as something that can only come from within a person and should not be sought for outside in the world. -Some very important points about happiness are discussed below: -1. Happiness can’t be bought with Money: -A lot of us try to find happiness where it is not. We associate and equate money with happiness. If at all there is happiness in money then all of the rich people we have around us would never feel sad. What we have come to see is that even the rich amongst us are the ones that suffer depression, relationship problems, stress, fear and even anxiousness. A lot of celebrities and successful people have committed suicide, this goes a long way to show that money or fame does not guarantee happiness. This does not mean that it is a bad thing to be rich and go after money. When you have money, you can afford many things that can make you and those around you very happy. -2. Happiness can only come from within: -There is a saying that explains that one can only get true happiness when one comes to the realisation that only one can make himself/herself happy. We can only find true happiness within ourselves and we can’t find it in other people. This saying and its meaning is always hammered on in different places but we still refuse to fully understand it and put it into good use. It is very important that we understand that happiness is nothing more than the state of a person’s mind. Happiness cannot come from all the physical things we see around us. Only we through our positive emotions that we can get through good thoughts have the ability to create true happiness. -Our emotions are created by our thoughts. Therefore, it is very important that we work on having only positive thoughts and this can be achieved when we see life in a positive light.""" - - -# In[3]: - - -# from nltk.corpus import stopwords -# stoplist = stopwords.words('english') + ['though'] -stoplist = ['i', - 'me', - 'my', - 'myself', - 'we', - 'our', - 'ours', - 'ourselves', - 'you', - "you're", - "you've", - "you'll", - "you'd", - 'your', - 'yours', - 'yourself', - 'yourselves', - 'he', - 'him', - 'his', - 'himself', - 'she', - "she's", - 'her', - 'hers', - 'herself', - 'it', - "it's", - 'its', - 'itself', - 'they', - 'them', - 'their', - 'theirs', - 'themselves', - 'what', - 'which', - 'who', - 'whom', - 'this', - 'that', - "that'll", - 'these', - 'those', - 'am', - 'is', - 'are', - 'was', - 'were', - 'be', - 'been', - 'being', - 'have', - 'has', - 'had', - 'having', - 'do', - 'does', - 'did', - 'doing', - 'a', - 'an', - 'the', - 'and', - 'but', - 'if', - 'or', - 'because', - 'as', - 'until', - 'while', - 'of', - 'at', - 'by', - 'for', - 'with', - 'about', - 'against', - 'between', - 'into', - 'through', - 'during', - 'before', - 'after', - 'above', - 'below', - 'to', - 'from', - 'up', - 'down', - 'in', - 'out', - 'on', - 'off', - 'over', - 'under', - 'again', - 'further', - 'then', - 'once', - 'here', - 'there', - 'when', - 'where', - 'why', - 'how', - 'all', - 'any', - 'both', - 'each', - 'few', - 'more', - 'most', - 'other', - 'some', - 'such', - 'no', - 'nor', - 'not', - 'only', - 'own', - 'same', - 'so', - 'than', - 'too', - 'very', - 's', - 't', - 'can', - 'will', - 'just', - 'don', - "don't", - 'should', - "should've", - 'now', - 'd', - 'll', - 'm', - 'o', - 're', - 've', - 'y', - 'ain', - 'aren', - "aren't", - 'couldn', - "couldn't", - 'didn', - "didn't", - 'doesn', - "doesn't", - 'hadn', - "hadn't", - 'hasn', - "hasn't", - 'haven', - "haven't", - 'isn', - "isn't", - 'ma', - 'mightn', - "mightn't", - 'mustn', - "mustn't", - 'needn', - "needn't", - 'shan', - "shan't", - 'shouldn', - "shouldn't", - 'wasn', - "wasn't", - 'weren', - "weren't", - 'won', - "won't", - 'wouldn', - "wouldn't", - 'though'] - - -# In[4]: - - -def get_sentiment(text): - return pn.pane.Markdown(f""" - Polarity (range from -1 negative to 1 positive): {TextBlob(text).polarity} \n - Subjectivity (range from 0 objective to 1 subjective): {TextBlob(text).subjectivity} - """) - - -# In[5]: - - -def get_ngram(text): - from sklearn.feature_extraction.text import CountVectorizer - c_vec = CountVectorizer(stop_words=stoplist, ngram_range=(2,3)) - # matrix of ngrams - try: - ngrams = c_vec.fit_transform([text]) - except ValueError: # if less than 2 words, return empty result - return pn.widgets.Tabulator(width=600) - # count frequency of ngrams - count_values = ngrams.toarray().sum(axis=0) - # list of ngrams - vocab = c_vec.vocabulary_ - df_ngram = pd.DataFrame(sorted([(count_values[i],k) for k,i in vocab.items()], reverse=True) - ).rename(columns={0: 'frequency', 1:'bigram/trigram'}) - df_ngram['polarity'] = df_ngram['bigram/trigram'].apply(lambda x: TextBlob(x).polarity) - df_ngram['subjective'] = df_ngram['bigram/trigram'].apply(lambda x: TextBlob(x).subjectivity) - return pn.widgets.Tabulator(df_ngram, width=600, height=300) - - -# In[6]: - - -def get_ntopics(text, ntopics): - from sklearn.feature_extraction.text import TfidfVectorizer - from sklearn.decomposition import NMF - from sklearn.pipeline import make_pipeline - tfidf_vectorizer = TfidfVectorizer(stop_words=stoplist, ngram_range=(2,3)) - nmf = NMF(n_components=ntopics) - pipe = make_pipeline(tfidf_vectorizer, nmf) - try: - pipe.fit([text]) - except ValueError: # if less than 2 words, return empty result - return - message = "" - for topic_idx, topic in enumerate(nmf.components_): - message += "####Topic #%d: " % topic_idx - message += ", ".join([tfidf_vectorizer.get_feature_names()[i] - for i in topic.argsort()[:-3 - 1:-1]]) - message += "\n" - return pn.pane.Markdown(message) - - -# In[7]: - - -explanation = pn.pane.Markdown(""" -This app provides a simple text analysis for a given input text or text file. \n -- Sentiment analysis uses [TextBlob](https://textblob.readthedocs.io/). -- N-gram analysis uses [scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) to see which words show up together. -- Topic modeling uses [scikit-learn](https://scikit-learn.org/stable/auto_examples/applications/plot_topics_extraction_with_nmf_lda.html) NMF model and we can change the number of topics we'd like to see in the result. -""") - -def get_text_results(_): - return pn.Column( - explanation, - pn.pane.Markdown(""" - ##Sentiment analysis:"""), - get_sentiment(text_widget.value.replace("\n", "")), - pn.pane.Markdown("##N-gram analysis (bigram/trigram):"), - get_ngram(text_widget.value.replace("\n", "")), - pn.pane.Markdown("##Topic modeling:"), - get_ntopics(text_widget.value.replace("\n", ""), ntopics_widget.value) - ) - - -# In[8]: - - -button = pn.widgets.Button(name="Click me to run!") - - -# In[9]: - - -file_input_widget = pn.widgets.FileInput() -def update_text_widget(event): - text_widget.value = event.new.decode("utf-8") -# when the value of file_input_widget changes, -# run this function to update the text of the text widget -file_input_widget.param.watch(update_text_widget, "value"); - - -# In[10]: - - -text_widget = pn.widgets.TextAreaInput(value=sample_text, height=300, name='Add text') - - -# In[11]: - - -ntopics_widget = pn.widgets.IntSlider(name='Number of topics', start=2, end=10, step=1, value=3) - - -# In[12]: - - -interactive = pn.bind(get_text_results, button) - - -# Layout using Template -template = pn.template.FastListTemplate( - title='Simple Text Analysis', - sidebar=[ - button, - ntopics_widget, - text_widget, - "Upload a text file", - file_input_widget - ], - main=[pn.panel(interactive, loading_indicator=True)], - accent_base_color="#88d8b0", - header_background="#88d8b0", -) -template.servable() - - -# In[ ]: - - - - diff --git a/spaces/stomexserde/gpt4-ui/Examples/Flat Out 2 Crack [Starforce4] Multi5 Latest Version.md b/spaces/stomexserde/gpt4-ui/Examples/Flat Out 2 Crack [Starforce4] Multi5 Latest Version.md deleted file mode 100644 index 8e2407ff4aeddaedbcf589fc46495ed025626f27..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Flat Out 2 Crack [Starforce4] Multi5 Latest Version.md +++ /dev/null @@ -1,33 +0,0 @@ -
          -

          Flat Out 2 Crack [Starforce4] Multi5 Latest Version: How to Download and Install

          - -

          If you are looking for a racing game that combines realistic physics, destructible environments, and adrenaline-pumping action, then you might want to check out Flat Out 2. This game was released in 2006 and received positive reviews from critics and players alike. However, if you want to play it on your PC, you might encounter some issues with the Starforce4 copy protection system that prevents the game from running on some Windows versions.

          - -

          Fortunately, there is a solution for this problem: Flat Out 2 Crack [Starforce4] Multi5 Latest Version. This is a modified version of the game that bypasses the Starforce4 protection and allows you to play the game without any hassle. It also includes the Multi5 language pack that lets you choose from English, French, German, Italian, and Spanish languages.

          -

          Flat Out 2 Crack [Starforce4] Multi5 Latest Version


          DOWNLOADhttps://urlgoal.com/2uI9IO



          - -

          In this article, we will show you how to download and install Flat Out 2 Crack [Starforce4] Multi5 Latest Version on your PC. Follow these simple steps and enjoy the game:

          - -
            -
          1. Download Flat Out 2 Crack [Starforce4] Multi5 Latest Version from a reliable source. You can find many links on the internet, but make sure you choose a safe and trusted one. You will need a torrent client to download the file.
          2. -
          3. Extract the downloaded file using a program like WinRAR or 7-Zip. You will get a folder named "Flat Out 2 Crack [Starforce4] Multi5 Latest Version".
          4. -
          5. Open the folder and run the setup.exe file. Follow the instructions on the screen and choose the destination folder where you want to install the game.
          6. -
          7. Wait for the installation to finish. It might take some time depending on your system specifications.
          8. -
          9. Once the installation is done, you can launch the game from the desktop shortcut or the start menu. You can also change the language settings from the options menu.
          10. -
          - -

          Congratulations! You have successfully installed Flat Out 2 Crack [Starforce4] Multi5 Latest Version on your PC. Now you can enjoy the thrilling racing experience with stunning graphics and sound effects. Have fun!

          - -

          If you are wondering what makes Flat Out 2 Crack [Starforce4] Multi5 Latest Version different from the original game, here are some of the features that you can expect:

          -

          - -
            -
          • A wide variety of cars and tracks to choose from. You can race in different modes such as career, arcade, time trial, and multiplayer. You can also customize your car with different parts and paint jobs.
          • -
          • A realistic damage system that affects the performance and appearance of your car. You can smash your car into walls, trees, fences, and other vehicles and see the results. You can also use your car as a weapon and cause havoc on the road.
          • -
          • A dynamic environment that reacts to your actions. You can destroy objects and structures on the track and create new shortcuts or obstacles. You can also trigger events such as explosions, fires, and landslides that affect the race.
          • -
          • An immersive sound and music system that enhances the atmosphere of the game. You can hear the roar of the engines, the screech of the tires, and the crash of the metal. You can also listen to a variety of songs from different genres such as rock, metal, punk, and hip hop.
          • -
          - -

          Flat Out 2 Crack [Starforce4] Multi5 Latest Version is a game that will keep you entertained for hours. It is a perfect choice for fans of racing games who want to experience something new and exciting. Download it today and get ready for some flat out fun!

          81aa517590
          -
          -
          \ No newline at end of file diff --git a/spaces/stratussox/yolov5_inference/detect.py b/spaces/stratussox/yolov5_inference/detect.py deleted file mode 100644 index 8e42fbe159d072dc1a8bf87797e2975932459268..0000000000000000000000000000000000000000 --- a/spaces/stratussox/yolov5_inference/detect.py +++ /dev/null @@ -1,258 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. - -Usage - sources: - $ python detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream - -Usage - formats: - $ python detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s_openvino_model # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s_paddle_model # PaddlePaddle -""" - -import argparse -import os -import platform -import sys -from pathlib import Path - -import torch - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams -from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) -from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import select_device, smart_inference_mode - - -@smart_inference_mode() -def run( - weights=ROOT / 'yolov5s.pt', # model path or triton URL - source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - imgsz=(640, 640), # inference size (height, width) - conf_thres=0.25, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_crop=False, # save cropped prediction boxes - nosave=False, # do not save images/videos - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'runs/detect', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - line_thickness=3, # bounding box thickness (pixels) - hide_labels=False, # hide labels - hide_conf=False, # hide confidences - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride -): - source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images - is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) - screenshot = source.lower().startswith('screen') - if is_url and is_file: - source = check_file(source) # download - - # Directories - save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) - stride, names, pt = model.stride, model.names, model.pt - imgsz = check_img_size(imgsz, s=stride) # check image size - - # Dataloader - bs = 1 # batch_size - if webcam: - view_img = check_imshow(warn=True) - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) - elif screenshot: - dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) - else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - vid_path, vid_writer = [None] * bs, [None] * bs - - # Run inference - model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) - for path, im, im0s, vid_cap, s in dataset: - with dt[0]: - im = torch.from_numpy(im).to(model.device) - im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - - # Inference - with dt[1]: - visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred = model(im, augment=augment, visualize=visualize) - - # NMS - with dt[2]: - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - - # Second-stage classifier (optional) - # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) - - # Process predictions - for i, det in enumerate(pred): # per image - seen += 1 - if webcam: # batch_size >= 1 - p, im0, frame = path[i], im0s[i].copy(), dataset.count - s += f'{i}: ' - else: - p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) - - p = Path(p) # to Path - save_path = str(save_dir / p.name) # im.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt - s += '%gx%g ' % im.shape[2:] # print string - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh - imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, example=str(names)) - if len(det): - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() - - # Print results - for c in det[:, 5].unique(): - n = (det[:, 5] == c).sum() # detections per class - s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string - - # Write results - for *xyxy, conf, cls in reversed(det): - if save_txt: # Write to file - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(f'{txt_path}.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - if save_img or save_crop or view_img: # Add bbox to image - c = int(cls) # integer class - label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') - annotator.box_label(xyxy, label, color=colors(c, True)) - if save_crop: - save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) - - # Stream results - im0 = annotator.result() - if view_img: - if platform.system() == 'Linux' and p not in windows: - windows.append(p) - cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) - cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) - cv2.imshow(str(p), im0) - cv2.waitKey(1) # 1 millisecond - - # Save results (image with detections) - if save_img: - if dataset.mode == 'image': - cv2.imwrite(save_path, im0) - else: # 'video' or 'stream' - if vid_path[i] != save_path: # new video - vid_path[i] = save_path - if isinstance(vid_writer[i], cv2.VideoWriter): - vid_writer[i].release() # release previous video writer - if vid_cap: # video - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - else: # stream - fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer[i].write(im0) - - # Print time (inference-only) - LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") - - # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) - if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") - if update: - strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') - parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--visualize', action='store_true', help='visualize features') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') - parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') - opt = parser.parse_args() - opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(vars(opt)) - return opt - - -def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/styles.py b/spaces/supertori/files/stable-diffusion-webui/modules/styles.py deleted file mode 100644 index d635c0109a1afd8867ef29b2d66ad864e1658113..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/styles.py +++ /dev/null @@ -1,87 +0,0 @@ -# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime -from __future__ import annotations - -import csv -import os -import os.path -import typing -import collections.abc as abc -import tempfile -import shutil - -if typing.TYPE_CHECKING: - # Only import this when code is being type-checked, it doesn't have any effect at runtime - from .processing import StableDiffusionProcessing - - -class PromptStyle(typing.NamedTuple): - name: str - prompt: str - negative_prompt: str - - -def merge_prompts(style_prompt: str, prompt: str) -> str: - if "{prompt}" in style_prompt: - res = style_prompt.replace("{prompt}", prompt) - else: - parts = filter(None, (prompt.strip(), style_prompt.strip())) - res = ", ".join(parts) - - return res - - -def apply_styles_to_prompt(prompt, styles): - for style in styles: - prompt = merge_prompts(style, prompt) - - return prompt - - -class StyleDatabase: - def __init__(self, path: str): - self.no_style = PromptStyle("None", "", "") - self.styles = {} - self.path = path - - self.reload() - - def reload(self): - self.styles.clear() - - if not os.path.exists(self.path): - return - - with open(self.path, "r", encoding="utf-8-sig", newline='') as file: - reader = csv.DictReader(file) - for row in reader: - # Support loading old CSV format with "name, text"-columns - prompt = row["prompt"] if "prompt" in row else row["text"] - negative_prompt = row.get("negative_prompt", "") - self.styles[row["name"]] = PromptStyle(row["name"], prompt, negative_prompt) - - def get_style_prompts(self, styles): - return [self.styles.get(x, self.no_style).prompt for x in styles] - - def get_negative_style_prompts(self, styles): - return [self.styles.get(x, self.no_style).negative_prompt for x in styles] - - def apply_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).prompt for x in styles]) - - def apply_negative_styles_to_prompt(self, prompt, styles): - return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) - - def save_styles(self, path: str) -> None: - # Write to temporary file first, so we don't nuke the file if something goes wrong - fd, temp_path = tempfile.mkstemp(".csv") - with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: - # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, - # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() - writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) - writer.writeheader() - writer.writerows(style._asdict() for k, style in self.styles.items()) - - # Always keep a backup file around - if os.path.exists(path): - shutil.move(path, path + ".bak") - shutil.move(temp_path, path) diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Facebook Hacker V.3.0 Password Download ((INSTALL)).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Facebook Hacker V.3.0 Password Download ((INSTALL)).md deleted file mode 100644 index 6a5eac3ac5a2d36419c78cfec86e5ab372323c7a..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Facebook Hacker V.3.0 Password Download ((INSTALL)).md +++ /dev/null @@ -1,6 +0,0 @@ -

          facebook hacker v.3.0 password download


          DOWNLOADhttps://cinurl.com/2uEYY3



          -
          -Way 3. How to Recover FB Account via Chrome Browser — Part 3: How to Find or Recover Facebook Account Password. Way 1. Find My ... 1fdad05405
          -
          -
          -

          diff --git a/spaces/tang155/bingo/src/lib/hooks/use-at-bottom.tsx b/spaces/tang155/bingo/src/lib/hooks/use-at-bottom.tsx deleted file mode 100644 index d37c8cf4162adcb0064e08ecec24eb731416b045..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/src/lib/hooks/use-at-bottom.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import * as React from 'react' - -export function useAtBottom(offset = 0) { - const [isAtBottom, setIsAtBottom] = React.useState(false) - - React.useEffect(() => { - const handleScroll = () => { - setIsAtBottom( - window.innerHeight + window.scrollY >= - document.body.offsetHeight - offset - ) - } - - window.addEventListener('scroll', handleScroll, { passive: true }) - handleScroll() - - return () => { - window.removeEventListener('scroll', handleScroll) - } - }, [offset]) - - return isAtBottom -} diff --git a/spaces/teamnassim/Fictionista/torch_utils/ops/bias_act.cpp b/spaces/teamnassim/Fictionista/torch_utils/ops/bias_act.cpp deleted file mode 100644 index 3adaeee2ae44e96655d354c2bdfb81de8ebfe6c6..0000000000000000000000000000000000000000 --- a/spaces/teamnassim/Fictionista/torch_utils/ops/bias_act.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "bias_act.h" - -//------------------------------------------------------------------------ - -static bool has_same_layout(torch::Tensor x, torch::Tensor y) -{ - if (x.dim() != y.dim()) - return false; - for (int64_t i = 0; i < x.dim(); i++) - { - if (x.size(i) != y.size(i)) - return false; - if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) - return false; - } - return true; -} - -//------------------------------------------------------------------------ - -static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp) -{ - // Validate arguments. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); - TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); - TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); - TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); - TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); - TORCH_CHECK(b.dim() == 1, "b must have rank 1"); - TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); - TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); - TORCH_CHECK(grad >= 0, "grad must be non-negative"); - - // Validate layout. - TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); - TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); - TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); - TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); - TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); - - // Create output tensor. - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - torch::Tensor y = torch::empty_like(x); - TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); - - // Initialize CUDA kernel parameters. - bias_act_kernel_params p; - p.x = x.data_ptr(); - p.b = (b.numel()) ? b.data_ptr() : NULL; - p.xref = (xref.numel()) ? xref.data_ptr() : NULL; - p.yref = (yref.numel()) ? yref.data_ptr() : NULL; - p.dy = (dy.numel()) ? dy.data_ptr() : NULL; - p.y = y.data_ptr(); - p.grad = grad; - p.act = act; - p.alpha = alpha; - p.gain = gain; - p.clamp = clamp; - p.sizeX = (int)x.numel(); - p.sizeB = (int)b.numel(); - p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; - - // Choose CUDA kernel. - void* kernel; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] - { - kernel = choose_bias_act_kernel(p); - }); - TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); - - // Launch CUDA kernel. - p.loopX = 4; - int blockSize = 4 * 32; - int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; - void* args[] = {&p}; - AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); - return y; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("bias_act", &bias_act); -} - -//------------------------------------------------------------------------ diff --git a/spaces/teralomaniac/clewd/clewd.js b/spaces/teralomaniac/clewd/clewd.js deleted file mode 100644 index b9d3c3c989c2564681f1db001006e23fe6ef0ac3..0000000000000000000000000000000000000000 --- a/spaces/teralomaniac/clewd/clewd.js +++ /dev/null @@ -1,863 +0,0 @@ -/* -* https://rentry.org/teralomaniac_clewd -* https://github.com/teralomaniac/clewd -*/ -'use strict'; - -const {createServer: Server, IncomingMessage, ServerResponse} = require('node:http'), {createHash: Hash, randomUUID, randomInt, randomBytes} = require('node:crypto'), {TransformStream, ReadableStream} = require('node:stream/web'), {Readable, Writable} = require('node:stream'), {Blob} = require('node:buffer'), {existsSync: exists, writeFileSync: write, createWriteStream} = require('node:fs'), {join: joinP} = require('node:path'), {ClewdSuperfetch: Superfetch, SuperfetchAvailable} = require('./lib/clewd-superfetch'), {AI, fileName, genericFixes, bytesToSize, setTitle, checkResErr, Replacements, Main} = require('./lib/clewd-utils'), ClewdStream = require('./lib/clewd-stream'); - -/******************************************************* */ -let currentIndex, Firstlogin = true, changeflag = 0, changetime = 0, totaltime, uuidOrgArray = []; - -const events = require('events'), CookieChanger = new events.EventEmitter(); -require('events').EventEmitter.defaultMaxListeners = 0; - -CookieChanger.on('ChangeCookie', () => { - Proxy && Proxy.close(); - console.log(`Changing Cookie...\n`); - Proxy.listen(Config.Port, Config.Ip, onListen); - Proxy.on('error', (err => { - console.error('Proxy error\n%o', err); - })); -}); - -const simpletokenizer = (prompt) => { - let byteLength = 0; - for (let i = 0; i < prompt.length; i++) { - let code = prompt.charCodeAt(i); - if (code <= 0xFF) { - byteLength += 0.6; - } else if (code <= 0xFFFF) { - byteLength += 1; - } else { - byteLength += 1.5; - } - } - return byteLength; -}, padtxt = (content) => { - if (Config.padtxt_placeholder.length > 0) { - var placeholder = Config.padtxt_placeholder; - } else { - const bytes = randomInt(5, 15); - var placeholder = randomBytes(bytes).toString('hex'); - } - let count = Math.floor((Config.Settings.padtxt - simpletokenizer(content)) / simpletokenizer(placeholder)); - - // 生成占位符字符串 - let padding = ''; - for (let i = 0; i < count; i++) { - padding += placeholder; - } - - // 在prompt前面添加占位符, 在末尾增加空行然后添加prompt - content = padding + '\n\n\n' + content; - - return content.trim(); -}, xmlPlot = (content) => { - // 检查内容中是否包含"" - if (!content.includes('')) { - content = content.replace(/(\n\n|^)xmlPlot:\s*/gm, '$1'); - content = content.replace(/(\n|\n<\/reply>)/g, ''); - return content.replace(/(.*?)<\/customname>/gm, '$1'); - } - - //群组 - content = content.replace(/(\n|\n<\/reply>)\1*/g, '$1'); - content = content.replace(/(.*?)<\/customname>:/gm, '$1:\n'); - - //role合并 - if (!content.includes('<\!-- Merge Disable -->')) { - if (!content.includes('<\!-- Merge Human Disable -->')) { - content = content.replace(/(\n\n|^)xmlPlot:/g, '$1Human:'); - content = content.replace(/(?:\n\n|^)Human:(.*?(?:\n\nAssistant:|$))/gs, function(match, p1) {return '\n\nHuman:' + p1.replace(/\n\nHuman:\s*/g, '\n\n')}); - content = content.replace(/^\s*Human:\s*/, ''); - } - if (!content.includes('<\!-- Merge Assistant Disable -->')) { - content = content.replace(/\n\nAssistant:(.*?(?:\n\nHuman:|$))/gs, function(match, p1) {return '\n\nAssistant:' + p1.replace(/\n\nAssistant:\s*/g, '\n\n')}); - } - } - content = content.replace(/(\n\n|^)xmlPlot:\s*/gm, '$1'); - content = content.replace(/<\!-- Merge.*?Disable -->/gm, ''); - - //格式顺序交换&越狱倒置 - content = content.replace(/.*?<\/Prev\1>/gs, function(match) {return match.replace(/\n\n(Assistant|Human):/g, '\n\ntemp$1:')}); - let segcontentAssistant = content.split('\n\nAssistant:'); - let processedsegAssistant = segcontentAssistant.map(seg => { - return seg.replace(/(\n\nHuman:.*?)(.*?)<\/PrevAssistant>/gs, '\n\n$2$1'); - }); - content = processedsegAssistant.join('\n\nAssistant:'); - let segcontentHuman = content.split('\n\nHuman:'); - const seglength = segcontentHuman.length; - for (let i = 1; i < seglength; i++) { - const match = segcontentHuman[i].match(/.*?<\/PrevHuman>/s); - if (match) { - segcontentHuman[i - 1] += match[0].replace(/(.*?)<\/PrevHuman>/s, '\n\n$1'); - segcontentHuman[i] = segcontentHuman[i].replace(match[0], ''); - } - } - if (/Assistant: *.$/.test(content) && seglength > 1 && !segcontentHuman[seglength - 2].includes('\n\nAssistant:')) { - segcontentHuman[seglength - 2] = segcontentHuman.splice(seglength - 1, 1, segcontentHuman[seglength - 2])[0]; - } - content = segcontentHuman.join('\n\nHuman:'); - content = content.replace(/\n\ntemp(Assistant|Human):/g, '\n\n$1:'); - - //给开头加上用于截断附加文件标识 - content.includes('') && (content = '\n\n' + content); - - // 在第一个"[Start a new"前面加上"",在最后一个"[Start a new"前面加上"\n\n\n\n" - const exampleNote = content.match(/(?<=).*(?=<\/example-note>)/) || ''; - const cardtag = content.match(/(?=\n\n<\/card>)/) || ''; - const exampletag = content.match(/(?=\n\n<\/example>)/) || ''; - const plot = content.includes('') ? '' : ''; - content = content.replace(/.*<\/example-note>/, ''); - const firstChatStart = content.indexOf('\n\n[Start a new'); - const lastChatStart = content.lastIndexOf('\n\n[Start a new'); - firstChatStart != -1 && firstChatStart === lastChatStart && (content = content.slice(0, firstChatStart) + `\n\n${cardtag}` + content.slice(firstChatStart)); - firstChatStart != lastChatStart && (content = content.slice(0, firstChatStart) + `\n\n${cardtag}\n\n${exampleNote}\n` + content.slice(firstChatStart, lastChatStart) + `\n\n${exampletag}\n\n${plot}` + content.slice(lastChatStart)); - - //Plain Prompt - segcontentHuman = content.split('\n\nHuman:'); - let segcontentlastIndex = segcontentHuman.length - 1; - if (segcontentlastIndex >= 2 && segcontentHuman[segcontentlastIndex].includes('') && !content.includes('\n\nPlainPrompt:')) { - content = segcontentHuman.slice(0, segcontentlastIndex).join('\n\nHuman:') + '\n\nPlainPrompt:' + segcontentHuman.slice(segcontentlastIndex).join('\n\nHuman:'); - } - content = content.replace(/<\!-- Plain Prompt Enable -->/, ''); - content = content.replace(/\n\nHuman:.*PlainPrompt:/, '\n\nPlainPrompt:'); - - //消除空XML tags或多余的\n - content = content.replace(/\n<\/(hidden|META)>\s+?<\1>\n/g, ''); - content = content.replace(/\n<(card|example|hidden|plot|META)>\s+?<\1>/g, '\n<$1>'); - content = content.replace(/(?:)?\n<(card|example|hidden|plot|META)>\s+?<\/\1>/g, ''); - content = content.replace(/(?<=(: |\n)<(card|hidden|example|plot|META)>\n)\s*/g, ''); - content = content.replace(/\s*(?=\n<\/(card|hidden|example|plot|META)>(\n|$))/g, ''); - content = content.replace(/(?<=\n)\n(?=\n)/g, ''); - - return content.trim(); -}; -/******************************************************* */ - -let ChangedSettings, UnknownSettings, Logger; - -const ConfigPath = joinP(__dirname, './config.js'), LogPath = joinP(__dirname, './log.txt'), Conversation = { - char: null, - uuid: null, - depth: 0 -}, cookies = {}; - -let uuidOrg, curPrompt = {}, prevPrompt = {}, prevMessages = [], prevImpersonated = false, Config = { - Cookie: '', - CookieArray: [], - Cookiecounter: 0, - CookieIndex: 0, - Ip: (process.env.Cookie || process.env.CookieArray) ? '0.0.0.0' : '127.0.0.1', - Port: process.env.PORT || 8444, - localtunnel: false, - BufferSize: 1, - SystemInterval: 3, - rProxy: AI.end(), - padtxt_placeholder: '', - PromptExperimentFirst: '', - PromptExperimentNext: '', - PersonalityFormat: '{{char}}\'s personality: {{personality}}', - ScenarioFormat: 'Dialogue scenario: {{scenario}}', - Settings: { - RenewAlways: true, - RetryRegenerate: false, - PromptExperiments: true, - SystemExperiments: true, - PreventImperson: false, - AllSamples: false, - NoSamples: false, - StripAssistant: false, - StripHuman: false, - PassParams: false, - ClearFlags: true, - PreserveChats: true, - LogMessages: true, - FullColon: true, - padtxt: 13500, - xmlPlot: true, - Superfetch: true - } -}; - -ServerResponse.prototype.json = async function(body, statusCode = 200, headers) { - body = body instanceof Promise ? await body : body; - this.headersSent || this.writeHead(statusCode, { - 'Content-Type': 'application/json', - ...headers && headers - }); - this.end('object' == typeof body ? JSON.stringify(body) : body); - return this; -}; - -Array.prototype.sample = function() { - return this[Math.floor(Math.random() * this.length)]; -}; - -const updateParams = res => { - updateCookies(res); -}, updateCookies = res => { - let cookieNew = ''; - res instanceof Response ? cookieNew = res.headers?.get('set-cookie') : res?.superfetch ? cookieNew = res.headers?.['set-cookie'] : 'string' == typeof res && (cookieNew = res.split('\n').join('')); - if (!cookieNew) { - return; - } - let cookieArr = cookieNew.split(/;\s?/gi).filter((prop => false === /^(path|expires|domain|HttpOnly|Secure|SameSite)[=;]*/i.test(prop))); - for (const cookie of cookieArr) { - const divide = cookie.split(/^(.*?)=\s*(.*)/), cookieName = divide[1], cookieVal = divide[2]; - cookies[cookieName] = cookieVal; - } -}, getCookies = () => { - const cookieNames = Object.keys(cookies); - return cookieNames.map(((name, idx) => `${name}=${cookies[name]}${idx === cookieNames.length - 1 ? '' : ';'}`)).join(' ').replace(/(\s+)$/gi, ''); -}, deleteChat = async uuid => { - if (!uuid) { - return; - } - if (uuid === Conversation.uuid) { - Conversation.uuid = null; - Conversation.depth = 0; - } - if (Config.Settings.PreserveChats) { - return; - } - const res = await fetch(`${Config.rProxy}/api/organizations/${uuidOrg}/chat_conversations/${uuid}`, { - headers: { - ...AI.hdr(), - Cookie: getCookies() - }, - method: 'DELETE' - }); - updateParams(res); -}, onListen = async () => { -/***************************** */ - if (Firstlogin) { - Firstlogin = false; - console.log(`${Main}\nhttp://${Config.Ip}:${Config.Port}/v1\n\n${Object.keys(Config.Settings).map((setting => UnknownSettings.includes(setting) ? `??? ${setting}: ${Config.Settings[setting]}` : `${setting}: ${ChangedSettings.includes(setting) ? '' : ''}${Config.Settings[setting]}`)).sort().join('\n')}\n`); - Config.Settings.Superfetch && SuperfetchAvailable(true); - if (Config.localtunnel) { - const localtunnel = require('localtunnel'); - localtunnel({ port: Config.Port }) - .then((tunnel) => { - console.log(`\nTunnel URL for outer websites: ${tunnel.url}/v1\n`); - }) - } - totaltime = Config.CookieArray.length; - } - if (Config.CookieArray?.length > 0) { - Config.Cookie = Config.CookieArray[currentIndex]; - currentIndex = (currentIndex + 1) % Config.CookieArray.length; - changetime += 1; - } - let percentage = ((changetime + Config.CookieIndex) / totaltime) * 100 - if (Config.Cookiecounter < 0 && percentage > 100) { - console.log(`\n※※※Cookie cleanup completed※※※\n\n`); - return process.exit(); - } -/***************************** */ - if ('SET YOUR COOKIE HERE' === Config.Cookie || Config.Cookie?.length < 1) { - throw Error('Set your cookie inside config.js'); - } - updateCookies(Config.Cookie); - //console.log(`${Main}\nhttp://${Config.Ip}:${Config.Port}/v1\n\n${Object.keys(Config.Settings).map((setting => UnknownSettings.includes(setting) ? `??? ${setting}: ${Config.Settings[setting]}` : `${setting}: ${ChangedSettings.includes(setting) ? '' : ''}${Config.Settings[setting]}`)).sort().join('\n')}\n`); - //Config.Settings.Superfetch && SuperfetchAvailable(true); - const accRes = await fetch(Config.rProxy + '/api/organizations', { - method: 'GET', - headers: { - ...AI.hdr(), - Cookie: getCookies() - } - }); -/**************************** */ - if (accRes.statusText === 'Forbidden' && Config.CookieArray?.length > 0) { - Config.CookieArray = Config.CookieArray.filter(item => item !== Config.Cookie); - !process.env.Cookie && !process.env.CookieArray && writeSettings(Config); - currentIndex = (currentIndex - 1 + Config.CookieArray.length) % Config.CookieArray.length; - console.log(`Expired!`); - Config.Cookiecounter < 0 && console.log(`[progress]: ${percentage.toFixed(2)}%\n[length]: ${Config.CookieArray.length}\n`); - CookieChanger.emit('ChangeCookie'); - return; - } -/**************************** */ - await checkResErr(accRes); - const accInfo = (await accRes.json())?.[0]; - if (!accInfo || accInfo.error) { - throw Error(`Couldn't get account info: "${accInfo?.error?.message || accRes.statusText}"`); - } - if (!accInfo?.uuid) { - throw Error('Invalid account id'); - } - setTitle('ok'); - updateParams(accRes); - console.log(Config.CookieArray?.length > 0 ? `(index: ${currentIndex || Config.CookieArray.length}) Logged in %o` : 'Logged in %o', { //console.log('Logged in %o', { - name: accInfo.name?.split('@')?.[0], - capabilities: accInfo.capabilities, - }); - uuidOrg = accInfo?.uuid; -/************************* */ - if (uuidOrgArray.includes(uuidOrg)) { - console.log(`Overlap!`); - currentIndex = (currentIndex - 1 + Config.CookieArray.length) % Config.CookieArray.length; - Config.Cookiecounter < 0 && console.log(`[progress]: ${percentage.toFixed(2)}%\n[length]: ${Config.CookieArray.length}\n`); - Config.CookieArray = Config.CookieArray.filter(item => item !== Config.Cookie); - !process.env.Cookie && !process.env.CookieArray && writeSettings(Config); - CookieChanger.emit('ChangeCookie'); - return; - } else { - uuidOrgArray.push(uuidOrg); - } -/************************* */ - if (accInfo?.active_flags.length > 0) { - const now = new Date, formattedFlags = accInfo.active_flags.map((flag => { - const days = ((new Date(flag.expires_at).getTime() - now.getTime()) / 864e5).toFixed(2); - return { - type: flag.type, - remaining_days: days - }; - })); - console.warn('Your account has warnings %o', formattedFlags); //console.warn('Your account has warnings %o', formattedFlags); - await Promise.all(accInfo.active_flags.map((flag => (async type => { - if (!Config.Settings.ClearFlags) { - return; - } - if ('consumer_restricted_mode' === type) { - return; - } - const req = await (Config.Settings.Superfetch ? Superfetch : fetch)(`${Config.rProxy}/api/organizations/${uuidOrg}/flags/${type}/dismiss`, { - headers: { - ...AI.hdr(), - Cookie: getCookies() - }, - method: 'POST' - }); - updateParams(req); - const json = await req.json(); - console.log(`${type}: ${json.error ? json.error.message || json.error.type || json.detail : 'OK'}`); - })(flag.type)))); -/***************************** */ - if (Config.CookieArray?.length > 0) { - console.log(`Restricted!`); - Config.Cookiecounter < 0 && console.log(`[progress]: ${percentage.toFixed(2)}%\n[length]: ${Config.CookieArray.length}\n`); - CookieChanger.emit('ChangeCookie'); - return; - } - } - if (Config.CookieArray.length > 0) { - const allres = await fetch(`${Config.rProxy}`, { - headers: { - ...AI.hdr(), - Cookie: getCookies() - }, - method: 'GET' - }), accountinfo = await allres.text(); - updateParams(allres); - const Unverified = accountinfo.includes('\\"completed_verification_at\\":null'); - const Banned = accountinfo.includes('\\"gate\":\\"segment:abuse\\",\\"gateValue\\":\\"true\\",'); - const Exceededlimit = /\\"messageLimit\\":{\\"type\\":\\"(approaching_limit\\",\\"remaining\\":0|exceeded_limit)\\",/.test(accountinfo); - const Remain = /\\"messageLimit\\":{\\"type\\":\\"approaching_limit\\",\\"remaining\\":\d+\\",/.exec(accountinfo); - Remain && (changeflag = Math.max(Config.Cookiecounter - Remain[0], changeflag)); - if (Unverified || Banned) { - Config.CookieArray = Config.CookieArray.filter(item => item !== Config.Cookie); - !process.env.Cookie && !process.env.CookieArray && writeSettings(Config); - currentIndex = (currentIndex - 1 + Config.CookieArray.length) % Config.CookieArray.length; - } - Unverified && console.log(`Unverified!`); - Banned && console.log(`Banned!`); - Exceededlimit && console.log(`Exceeded limit!`); - Config.Cookiecounter < 0 && console.log(`[progress]: ${percentage.toFixed(2)}%\n[length]: ${Config.CookieArray.length}`); - if (Unverified || Banned || Exceededlimit || Config.Cookiecounter < 0) { - console.log(''); - CookieChanger.emit('ChangeCookie'); - return; - } -/***************************** */ - } - const convRes = await fetch(`${Config.rProxy}/api/organizations/${uuidOrg}/chat_conversations`, { - method: 'GET', - headers: { - ...AI.hdr(), - Cookie: getCookies() - } - }), conversations = await convRes.json(); - updateParams(convRes); - conversations.length > 0 && await Promise.all(conversations.map((conv => deleteChat(conv.uuid)))); -}, writeSettings = async (config, firstRun = false) => { - write(ConfigPath, `/*\n* https://rentry.org/teralomaniac_clewd\n* https://github.com/teralomaniac/clewd\n*/\n\n// SET YOUR COOKIE BELOW\n\nmodule.exports = ${JSON.stringify(config, null, 4)}\n\n/*\n BufferSize\n * How many characters will be buffered before the AI types once\n * lower = less chance of \`PreventImperson\` working properly\n\n ---\n\n SystemInterval\n * How many messages until \`SystemExperiments alternates\`\n\n ---\n\n Other settings\n * https://gitgud.io/ahsk/clewd/#defaults\n * and\n * https://gitgud.io/ahsk/clewd/-/blob/master/CHANGELOG.md\n */`.trim().replace(/((? { - if ('OPTIONS' === req.method) { - return ((req, res) => { - res.writeHead(200, { - 'Access-Control-Allow-Origin': '*', - 'Access-Control-Allow-Headers': 'Authorization, Content-Type', - 'Access-Control-Allow-Methods': 'POST, GET, OPTIONS' - }).end(); - })(0, res); - } - switch (req.url) { - case '/v1/models': - res.json({ - data: [ { - id: AI.mdl() - } ] - }); - break; - - case '/v1/chat/completions': - ((req, res) => { - setTitle('recv...'); - let fetchAPI, changer; //let fetchAPI; - const abortControl = new AbortController, {signal} = abortControl; - res.socket.on('close', (async () => { - abortControl.signal.aborted || abortControl.abort(); - })); - const buffer = []; - req.on('data', (chunk => { - buffer.push(chunk); - })); - req.on('end', (async () => { - let clewdStream, titleTimer, samePrompt = false, shouldRenew = true, retryRegen = false; - try { - const body = JSON.parse(Buffer.concat(buffer).toString()), temperature = Math.max(.1, Math.min(1, body.temperature)); - let {messages} = body; - if (messages?.length < 1) { - throw Error('Select OpenAI as completion source'); - } - if (!body.stream && 1 === messages.length && JSON.stringify(messages.sort() || []) === JSON.stringify([ { - role: 'user', - content: 'Hi' - } ].sort())) { - return res.json({ - choices: [ { - message: { - content: Main - } - } ] - }); - } - res.setHeader('Access-Control-Allow-Origin', '*'); - body.stream && res.setHeader('Content-Type', 'text/event-stream'); - if (!body.stream && messages?.[0]?.content?.startsWith('From the list below, choose a word that best represents a character\'s outfit description, action, or emotion in their dialogue')) { - return res.json({ - choices: [ { - message: { - content: 'neutral' - } - } ] - }); - } - if (Config.Settings.AllSamples && Config.Settings.NoSamples) { - console.log('having AllSamples and NoSamples both set to true is not supported'); - throw Error('Only one can be used at the same time: AllSamples/NoSamples'); - } - const model = AI.mdl(); - curPrompt = { - firstUser: messages.find((message => 'user' === message.role)), - firstSystem: messages.find((message => 'system' === message.role)), - firstAssistant: messages.find((message => 'assistant' === message.role)), - lastUser: messages.findLast((message => 'user' === message.role)), - lastSystem: messages.findLast((message => 'system' === message.role && '[Start a new chat]' !== message.content)), - lastAssistant: messages.findLast((message => 'assistant' === message.role)) - }; - prevPrompt = { - ...prevMessages.length > 0 && { - firstUser: prevMessages.find((message => 'user' === message.role)), - firstSystem: prevMessages.find((message => 'system' === message.role)), - firstAssistant: prevMessages.find((message => 'assistant' === message.role)), - lastUser: prevMessages.findLast((message => 'user' === message.role)), - lastSystem: prevMessages.find((message => 'system' === message.role && '[Start a new chat]' !== message.content)), - lastAssistant: prevMessages.findLast((message => 'assistant' === message.role)) - } - }; - samePrompt = JSON.stringify(messages.filter((message => 'system' !== message.role)).sort()) === JSON.stringify(prevMessages.filter((message => 'system' !== message.role)).sort()); - const sameCharDiffChat = !samePrompt && curPrompt.firstSystem?.content === prevPrompt.firstSystem?.content && curPrompt.firstUser?.content !== prevPrompt.firstUser?.content; - shouldRenew = Config.Settings.RenewAlways || !Conversation.uuid || prevImpersonated || !Config.Settings.RenewAlways && samePrompt || sameCharDiffChat; - retryRegen = Config.Settings.RetryRegenerate && samePrompt && null != Conversation.uuid; - samePrompt || (prevMessages = JSON.parse(JSON.stringify(messages))); - let type = ''; - if (retryRegen) { - type = 'R'; - fetchAPI = await (async (signal, model) => { - let res; - const body = { - completion: { - prompt: '', - timezone: AI.zone(), - model: model || AI.mdl() - }, - organization_uuid: uuidOrg, - conversation_uuid: Conversation.uuid, - text: '' - }; - let headers = { - ...AI.hdr(Conversation.uuid || ''), - Accept: 'text/event-stream', - Cookie: getCookies() - }; - if (Config.Settings.Superfetch) { - const names = Object.keys(headers), values = Object.values(headers); - headers = names.map(((header, idx) => `${header}: ${values[idx]}`)); - } - res = await (Config.Settings.Superfetch ? Superfetch : fetch)(Config.rProxy + '/api/retry_message', { - stream: true, - signal, - method: 'POST', - body: JSON.stringify(body), - headers - }); - updateParams(res); - await checkResErr(res); - return res; - })(signal, model); - } else if (shouldRenew) { - Conversation.uuid && await deleteChat(Conversation.uuid); - fetchAPI = await (async signal => { - Conversation.uuid = randomUUID().toString(); - Conversation.depth = 0; - const res = await (Config.Settings.Superfetch ? Superfetch : fetch)(`${Config.rProxy}/api/organizations/${uuidOrg}/chat_conversations`, { - signal, - headers: { - ...AI.hdr(), - Cookie: getCookies() - }, - method: 'POST', - body: JSON.stringify({ - uuid: Conversation.uuid, - name: '' - }) - }); - updateParams(res); - await checkResErr(res); - return res; - })(signal); - type = 'r'; - } else if (samePrompt) {} else { - const systemExperiment = !Config.Settings.RenewAlways && Config.Settings.SystemExperiments; - if (!systemExperiment || systemExperiment && Conversation.depth >= Config.SystemInterval) { - type = 'c-r'; - Conversation.depth = 0; - } else { - type = 'c-c'; - Conversation.depth++; - } - } - let {prompt, systems} = ((messages, type) => { - const rgxScenario = /^\[Circumstances and context of the dialogue: ([\s\S]+?)\.?\]$/i, rgxPerson = /^\[([\s\S]+?)'s personality: ([\s\S]+?)\]$/i, messagesClone = JSON.parse(JSON.stringify(messages)), realLogs = messagesClone.filter((message => [ 'user', 'assistant' ].includes(message.role))), sampleLogs = messagesClone.filter((message => message.name)), mergedLogs = [ ...sampleLogs, ...realLogs ]; - mergedLogs.forEach(((message, idx) => { - const next = mergedLogs[idx + 1]; - message.customname = (message => [ 'assistant', 'user' ].includes(message.role) && null != message.name && !(message.name in Replacements))(message); - if (next && !Config.Settings.xmlPlot) { //if (next) { - if ('name' in message && 'name' in next) { - if (message.name === next.name) { - message.content += '\n\n' + next.content; //message.content += '\n' + next.content; - next.merged = true; - } - } else if ('system' !== next.role) { - if (next.role === message.role) { - message.content += '\n\n' + next.content; //message.content += '\n' + next.content; - next.merged = true; - } - } else { - message.content += '\n\n' + next.content; //message.content += '\n' + next.content; - next.merged = true; - } - } - })); - const lastAssistant = realLogs.findLast((message => !message.merged && 'assistant' === message.role)); - lastAssistant && Config.Settings.StripAssistant && (lastAssistant.strip = true); - const lastUser = realLogs.findLast((message => !message.merged && 'user' === message.role)); - lastUser && Config.Settings.StripHuman && (lastUser.strip = true); - const systemMessages = messagesClone.filter((message => 'system' === message.role && !('name' in message))); - systemMessages.forEach(((message, idx) => { - const scenario = message.content.match(rgxScenario)?.[1], personality = message.content.match(rgxPerson); - if (scenario) { - message.content = Config.ScenarioFormat.replace(/{{scenario}}/gim, scenario); - message.scenario = true; - } - if (3 === personality?.length) { - message.content = Config.PersonalityFormat.replace(/{{char}}/gim, personality[1]).replace(/{{personality}}/gim, personality[2]); - message.personality = true; - } - message.main = 0 === idx; - message.jailbreak = idx === systemMessages.length - 1; - ' ' === message.content && (message.discard = true); - })); - Config.Settings.AllSamples && !Config.Settings.NoSamples && realLogs.forEach((message => { - if (![ lastUser, lastAssistant ].includes(message)) { - if ('user' === message.role) { - message.name = message.customname ? message.name : 'example_user'; - message.role = 'system'; - } else if ('assistant' === message.role) { - message.name = message.customname ? message.name : 'example_assistant'; - message.role = 'system'; - } else if (!message.customname) { - throw Error('Invalid role ' + message.name); - } - } - })); - Config.Settings.NoSamples && !Config.Settings.AllSamples && sampleLogs.forEach((message => { - if ('example_user' === message.name) { - message.role = 'user'; - } else if ('example_assistant' === message.name) { - message.role = 'assistant'; - } else if (!message.customname) { - throw Error('Invalid role ' + message.name); - } - message.customname || delete message.name; - })); - let systems = []; - if (![ 'r', 'R' ].includes(type)) { - lastUser.strip = true; - systemMessages.forEach((message => message.discard = message.discard || 'c-c' === type ? !message.jailbreak : !message.jailbreak && !message.main)); - systems = systemMessages.filter((message => !message.discard)).map((message => `"${message.content.substring(0, 25).replace(/\n/g, '\\n').trim()}..."`)); - messagesClone.forEach((message => message.discard = message.discard || mergedLogs.includes(message) && ![ lastUser ].includes(message))); - } - const prompt = messagesClone.map(((message, idx) => { - if (message.merged || message.discard) { - return ''; - } - if (message.content.length < 1) { - return message.content; - } - let spacing = ''; -/****************************************************************/ - if (Config.Settings.xmlPlot) { - idx > 0 && (spacing = '\n\n'); - const prefix = message.customname ? message.role + ': ' + message.name + ': ' : 'system' !== message.role || message.name ? Replacements[message.name || message.role] + ': ' : 'xmlPlot: ' + Replacements[message.role]; - return `${spacing}${prefix}${message.customname ? '\n' + message.content.trim() + '\n' : message.content}`; - } else { -/****************************************************************/ - idx > 0 && (spacing = systemMessages.includes(message) ? '\n' : '\n\n'); - const prefix = message.customname ? message.name + ': ' : 'system' !== message.role || message.name ? Replacements[message.name || message.role] + ': ' : '' + Replacements[message.role]; - return `${spacing}${message.strip ? '' : prefix}${'system' === message.role ? message.content : message.content.trim()}`; - } // - })); - return { - prompt: genericFixes(prompt.join('')).trim(), - systems - }; - })(messages, type); - console.log(`${model} [${type}]${!retryRegen && systems.length > 0 ? ' ' + systems.join(' / ') : ''}`); - 'R' !== type || prompt || (prompt = '...regen...'); -/****************************************************************/ - Config.Settings.xmlPlot && (prompt = xmlPlot(prompt)); - Config.Settings.FullColon && (prompt = prompt.replace(/(?<=\n\n(H(?:uman)?|A(?:ssistant)?)):[ ]?/g, ': ')); - Config.Settings.padtxt && (prompt = padtxt(prompt)); -/****************************************************************/ - Logger?.write(`\n\n-------\n[${(new Date).toLocaleString()}]\n####### PROMPT (${type}):\n${prompt}\n--\n####### REPLY:\n`); - retryRegen || (fetchAPI = await (async (signal, model, prompt, temperature, type) => { - const attachments = []; - if (Config.Settings.PromptExperiments) { -/****************************************************************/ - let splitedprompt = prompt.split('\n\nPlainPrompt:'); - prompt = splitedprompt[0]; -/****************************************************************/ - attachments.push({ - extracted_content: (prompt), - file_name: 'paste.txt', //fileName(), - file_size: Buffer.from(prompt).byteLength, - file_type: 'txt' //'text/plain' - }); - prompt = 'r' === type ? Config.PromptExperimentFirst : Config.PromptExperimentNext; -/****************************************************************/ - splitedprompt.length > 1 && (prompt = prompt + splitedprompt[1]); -/****************************************************************/ - } - let res; - const body = { - completion: { - ...Config.Settings.PassParams && { - temperature - }, - prompt: prompt || '', - timezone: AI.zone(), - model: model || AI.mdl() - }, - organization_uuid: uuidOrg, - conversation_uuid: Conversation.uuid, - text: prompt, - attachments - }; - let headers = { - ...AI.hdr(Conversation.uuid || ''), - Accept: 'text/event-stream', - Cookie: getCookies() - }; - res = await (Config.Settings.Superfetch ? Superfetch : fetch)(Config.rProxy + '/api/append_message', { - stream: true, - signal, - method: 'POST', - body: JSON.stringify(body), - headers - }); - updateParams(res); - await checkResErr(res); - return res; - })(signal, model, prompt, temperature, type)); - const response = Writable.toWeb(res); - clewdStream = new ClewdStream({ - config: Config, - version: Main, - minSize: Config.BufferSize, - model, - streaming: body.stream, - abortControl, - source: fetchAPI - }, Logger); - titleTimer = setInterval((() => setTitle('recv ' + bytesToSize(clewdStream.size))), 300); - Config.Settings.Superfetch ? await Readable.toWeb(fetchAPI.body).pipeThrough(clewdStream).pipeTo(response) : await fetchAPI.body.pipeThrough(clewdStream).pipeTo(response); - } catch (err) { - if ('AbortError' === err.name) { - res.end(); - } else { - err.planned || console.error('Clewd:\n%o', err); - res.json({ - error: { - message: 'clewd: ' + (err.message || err.name || err.type), - type: err.type || err.name || err.code, - param: null, - code: err.code || 500 - } - }); - } - } - clearInterval(titleTimer); - if (clewdStream) { - clewdStream.censored && console.warn('likely your account is hard-censored'); - prevImpersonated = clewdStream.impersonated; - setTitle('ok ' + bytesToSize(clewdStream.size)); - //console.log(`${200 == fetchAPI.status ? '' : ''}${fetchAPI.status}!\n`); -/******************************** */ - 429 == fetchAPI.status ? console.log(`Exceeded limit!\n`) : console.log(`${200 == fetchAPI.status ? '' : ''}${fetchAPI.status}!\n`); - changeflag += 1; - if (Config.CookieArray?.length > 0 && (429 == fetchAPI.status || (Config.Cookiecounter && changeflag >= Config.Cookiecounter))) { - changeflag = 0; - changer = true; - } -/******************************** */ - clewdStream.empty(); - } - if (prevImpersonated) { - try { - await deleteChat(Conversation.uuid); - } catch (err) {} - } -/******************************** */ - changer && CookieChanger.emit('ChangeCookie'); -/******************************** */ - })); - })(req, res); - break; - - case '/v1/complete': - res.json({ - error: { - message: 'clewd: Set "Chat Completion" to OpenAI instead of Claude. Enable "External" models aswell' - } - }); - break; - - default: - req.url !== '/' && (console.log('unknown request: ' + req.url)); //console.log('unknown request: ' + req.url); - res.json({ - error: { - message: '404 Not Found', - type: 404, - param: null, - code: 404 - } - }, 200); - } -})); - -!async function() { - await (async () => { - if (exists(ConfigPath)) { - const userConfig = require(ConfigPath), validConfigs = Object.keys(Config), parsedConfigs = Object.keys(userConfig), parsedSettings = Object.keys(userConfig.Settings), invalidConfigs = parsedConfigs.filter((config => !validConfigs.includes(config))), validSettings = Object.keys(Config.Settings); - UnknownSettings = parsedSettings.filter((setting => !validSettings.includes(setting))); - invalidConfigs.forEach((config => { - console.warn(`unknown config in config.js: ${config}`); - })); - UnknownSettings.forEach((setting => { - console.warn(`unknown setting in config.js: Settings.${setting}`); - })); - const missingConfigs = validConfigs.filter((config => !parsedConfigs.includes(config))), missingSettings = validSettings.filter((config => !parsedSettings.includes(config))); - missingConfigs.forEach((config => { - console.warn(`adding missing config in config.js: ${config}`); - userConfig[config] = Config[config]; - })); - missingSettings.forEach((setting => { - console.warn(`adding missing setting in config.js: Settings.${setting}`); - userConfig.Settings[setting] = Config.Settings[setting]; - })); - ChangedSettings = parsedSettings.filter((setting => Config.Settings[setting] !== userConfig.Settings[setting])); - (missingConfigs.length > 0 || missingSettings.length > 0) && await writeSettings(userConfig); - userConfig.Settings.LogMessages && (Logger = createWriteStream(LogPath)); - Config = { - ...Config, - ...userConfig - }; - } else { - Config.Cookie = 'SET YOUR COOKIE HERE'; - writeSettings(Config, true); - } -/***************************** */ - function convertToType(value) { - if (value === "true") return true; - if (value === "false") return false; - if (/^\d+$/.test(value)) return parseInt(value); - return value; - } - for (let key in Config) { - if (key === 'Settings') { - for (let setting in Config.Settings) { - Config.Settings[setting] = convertToType(process.env[setting]) ?? Config.Settings[setting]; - } - } else { - Config[key] = key === 'CookieArray' ? (process.env[key]?.split(',')?.map(x => x.replace(/[\[\]"\s]/g, '')) ?? Config[key]) : (convertToType(process.env[key]) ?? Config[key]); - } - } -/***************************** */ - })(); -/***************************** */ - !Config.rProxy && (Config.rProxy = AI.end()); - Config.rProxy.endsWith('/') && (Config.rProxy = Config.rProxy.slice(0, -1)); - let uniqueArr = [], seen = new Set(); - for (let Cookie of Config.CookieArray) { - if (!seen.has(Cookie)) { - uniqueArr.push(Cookie); - seen.add(Cookie); - } - } - Config.CookieArray = uniqueArr; - !process.env.Cookie && !process.env.CookieArray && writeSettings(Config); - currentIndex = Config.CookieIndex > 0 ? Config.CookieIndex - 1 : Config.Cookiecounter >= 0 ? Math.floor(Math.random()*Config.CookieArray.length) : 0; -/***************************** */ - Proxy.listen(Config.Port, Config.Ip, onListen); - Proxy.on('error', (err => { - console.error('Proxy error\n%o', err); - })); -}(); - -const cleanup = async () => { - console.log('cleaning...'); - try { - await deleteChat(Conversation.uuid); - Logger?.close(); - } catch (err) {} - process.exit(); -}; - -process.on('SIGHUP', cleanup); - -process.on('SIGTERM', cleanup); - -process.on('SIGINT', cleanup); - -process.on('exit', (async () => { - console.log('exiting...'); -})); diff --git a/spaces/theonerichy/wd-v1-4-tags/Utils/dbimutils.py b/spaces/theonerichy/wd-v1-4-tags/Utils/dbimutils.py deleted file mode 100644 index e01496710f8905e542dbe7e89c91fd2c8d1bc14a..0000000000000000000000000000000000000000 --- a/spaces/theonerichy/wd-v1-4-tags/Utils/dbimutils.py +++ /dev/null @@ -1,54 +0,0 @@ -# DanBooru IMage Utility functions - -import cv2 -import numpy as np -from PIL import Image - - -def smart_imread(img, flag=cv2.IMREAD_UNCHANGED): - if img.endswith(".gif"): - img = Image.open(img) - img = img.convert("RGB") - img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) - else: - img = cv2.imread(img, flag) - return img - - -def smart_24bit(img): - if img.dtype is np.dtype(np.uint16): - img = (img / 257).astype(np.uint8) - - if len(img.shape) == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - elif img.shape[2] == 4: - trans_mask = img[:, :, 3] == 0 - img[trans_mask] = [255, 255, 255, 255] - img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) - return img - - -def make_square(img, target_size): - old_size = img.shape[:2] - desired_size = max(old_size) - desired_size = max(desired_size, target_size) - - delta_w = desired_size - old_size[1] - delta_h = desired_size - old_size[0] - top, bottom = delta_h // 2, delta_h - (delta_h // 2) - left, right = delta_w // 2, delta_w - (delta_w // 2) - - color = [255, 255, 255] - new_im = cv2.copyMakeBorder( - img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color - ) - return new_im - - -def smart_resize(img, size): - # Assumes the image has already gone through make_square - if img.shape[0] > size: - img = cv2.resize(img, (size, size), interpolation=cv2.INTER_AREA) - elif img.shape[0] < size: - img = cv2.resize(img, (size, size), interpolation=cv2.INTER_CUBIC) - return img diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/21 Jump Street 720p Mp4 12.md b/spaces/tialenAdioni/chat-gpt-api/logs/21 Jump Street 720p Mp4 12.md deleted file mode 100644 index bfb680e6a43e1d1fbe4d4388aaf88bd4afda814b..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/21 Jump Street 720p Mp4 12.md +++ /dev/null @@ -1,14 +0,0 @@ - -

          How to Watch 21 Jump Street in High Quality

          -

          21 Jump Street is a 2012 action comedy film starring Jonah Hill and Channing Tatum as two underachieving cops who go undercover as high school students to bust a drug ring. The film is based on the 1980s television series of the same name that launched Johnny Depp's career. The film was a critical and commercial success, earning positive reviews and over $200 million at the box office. It also spawned a sequel, 22 Jump Street, in 2014.

          -

          21 Jump Street 720p Mp4 12


          Download ✏ ✏ ✏ https://urlcod.com/2uK65B



          -

          If you want to watch 21 Jump Street in high quality, you have several options. You can stream it on Netflix with a subscription[^1^], or you can rent or buy it on various online platforms such as Amazon Prime Video, iTunes, Google Play, YouTube, or Vudu. You can also download it from some websites that offer free or pirated movies, but this is not recommended as it may be illegal, unsafe, or low-quality.

          -

          One of the websites that claims to offer 21 Jump Street in high quality is Archive.org[^2^], a non-profit library of millions of free books, movies, music, and more. However, this website does not actually have the movie itself, but only a PDF file that contains some information about the movie and its cast and crew. The file name is "21-jump-street_202101.pdf" and it has a size of 12 MB. This file is not a video file and cannot be played by any media player. It is also not clear why the file name includes "720p" and "Mp4", which are video formats that have nothing to do with PDF files.

          -

          Therefore, if you want to watch 21 Jump Street in high quality, you should avoid downloading this file from Archive.org or any other website that offers it. Instead, you should choose a legal and reliable source that can provide you with a good viewing experience. You can also watch the original TV series on YouTube[^3^], where you can find all five seasons of 21 Jump Street for free.

          - -

          21 Jump Street is not only a hilarious and action-packed movie, but also a tribute to the original TV series that inspired it. The film features many references and Easter eggs that fans of the show can appreciate. For example, all original cast members from the 21 Jump Street TV series have cameos in the film, except Richard Grieco and Dustin Nguyen. In one scene, the bad guys watch the original TV series on television, and almost every shot shows Nguyen's character. This is a nod to the fact that Nguyen was the only cast member who did not want to participate in the film.

          -

          -

          Another reference to the TV series is the address of the undercover headquarters, which is 37 Jump Street. This is a joke on the fact that the TV series was filmed at 37 North Union Street in Vancouver, Canada. The film also pays homage to some of the memorable episodes and characters from the show, such as Booker, Hanson, Penhall, Ioki, and Fuller. The film also features a surprise twist at the end that reveals the true identity of one of the main characters, which is a shocker for both new and old fans alike.

          -

          21 Jump Street is a film that can be enjoyed by anyone who loves comedy and action, but especially by those who grew up watching the TV series. The film manages to balance nostalgia and innovation, while delivering a fun and entertaining story. It is a rare example of a successful adaptation that honors its source material while creating something new and original.

          e93f5a0c3f
          -
          -
          \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Boss Baby English 720p Movie Download Utorrent The Best Way to Enjoy the Animated Comedy.md b/spaces/tialenAdioni/chat-gpt-api/logs/Boss Baby English 720p Movie Download Utorrent The Best Way to Enjoy the Animated Comedy.md deleted file mode 100644 index be478bc1d18d86006f5e38f1520cf36b238dd4fd..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Boss Baby English 720p Movie Download Utorrent The Best Way to Enjoy the Animated Comedy.md +++ /dev/null @@ -1,147 +0,0 @@ -
          -

          How to Watch Boss Baby English 720p Movie with Utorrent

          - -

          Boss Baby is a 2017 animated comedy film that follows the adventures of a talking baby who works as a secret agent for a corporation that competes with puppies for the love of humans. The film features the voices of Alec Baldwin, Steve Buscemi, Jimmy Kimmel, Lisa Kudrow and Tobey Maguire. If you are a fan of animation and humor, you might want to watch Boss Baby English 720p Movie with Utorrent, a free and easy way to download movies from the internet.

          -

          Boss Baby English 720p Movie Download Utorrent


          DOWNLOAD ––– https://urlcod.com/2uKaY0



          - -

          In this article, we will show you how to download and watch Boss Baby English 720p Movie with Utorrent, a popular torrent client software that allows you to download files from other users who share them. We will also give you some tips on how to enjoy the movie in high quality and avoid any legal issues.

          - -

          What is Utorrent and How Does it Work?

          - -

          Utorrent is a software that enables you to download files from the internet using a peer-to-peer (P2P) network. This means that instead of downloading files from a central server, you download them from other users who have the same files on their computers. This way, you can download files faster and more efficiently, as well as share them with others.

          - -

          To download files with Utorrent, you need two things: a torrent file and a torrent client. A torrent file is a small file that contains information about the file you want to download, such as its name, size, location and source. A torrent client is a software that reads the torrent file and connects you to other users who have the file you want. Utorrent is one of the most popular torrent clients available.

          - -

          How to Download Boss Baby English 720p Movie with Utorrent

          - -

          Before you start downloading Boss Baby English 720p Movie with Utorrent, you need to make sure that you have the following things:

          - -
            -
          • A computer or laptop with a good internet connection.
          • -
          • Utorrent software, which you can download from here.
          • -
          • A torrent file or magnet link for Boss Baby English 720p Movie, which you can find from various websites such as Internet Archive, KatMovieHD, or Telegastro.net.
          • -
          - -

          Once you have these things ready, follow these steps to download Boss Baby English 720p Movie with Utorrent:

          -

          Boss Baby Full HD English Movie Torrent Download
          -How to Download Boss Baby 720p in English with Utorrent
          -Boss Baby English Subtitles 720p Movie Torrent
          -Watch Boss Baby Online Free English HD 720p Utorrent
          -Boss Baby 2021 English 720p BluRay Torrent Download
          -Boss Baby English Dubbed 720p Movie Utorrent Magnet Link
          -Download Boss Baby Movie in English 720p Quality Utorrent
          -Boss Baby English Audio 720p Torrent Free Download
          -Boss Baby Movie Download English 720p Utorrent Kickass
          -Boss Baby HD English Movie Utorrent Download 720p
          -Boss Baby English Version 720p Movie Torrent File
          -Boss Baby 720p English Movie Utorrent Direct Download
          -Boss Baby Movie English 720p Utorrent Fast Download
          -Boss Baby English 720p Torrent Download Full Movie
          -Boss Baby Movie in English 720p Utorrent Free
          -Download Boss Baby English 720p Movie with Utorrent
          -Boss Baby English HD 720p Movie Torrent Link
          -Boss Baby Movie Torrent Download English 720p Utorrent
          -Boss Baby English 720p Utorrent Movie Download Site
          -Boss Baby HD Movie Download in English 720p Utorrent
          -Boss Baby English 720p Movie Utorrent Best Quality
          -Boss Baby Movie Download Utorrent 720p English Language
          -Boss Baby English Movie Torrent 720p Utorrent Download
          -Boss Baby 720p HD Movie in English Utorrent Download
          -Boss Baby Movie Utorrent Download English 720p Free
          -Boss Baby English Movie 720p Utorrent High Speed Download
          -Boss Baby 720p Movie Download in English with Utorrent
          -Boss Baby HD English Movie Torrent Free Download 720p
          -Boss Baby Movie in English Utorrent Download 720p
          -Boss Baby English 720p Movie Torrent Download Utorrent
          -Boss Baby Full Movie in English 720p Utorrent Download
          -How to Watch Boss Baby Online in English HD 720p Utorrent
          -Boss Baby Movie Torrent in English 720p Utorrent Free Download
          -Boss Baby HD Movie Torrent Download in English 720p Utorrent
          -Boss Baby Full Movie Torrent Download in English 720p Utorrent
          -Watch Online Boss Baby Full Movie in English HD 720p Utorrent
          -Boss Baby Full HD Movie in English Torrent Download 720p Utorrent
          -How to Get Boss Baby Full Movie in English HD 720p with Utorrent
          -Where to Find Boss Baby Full Movie in English HD 720p on Utorrent
          -How to Stream Boss Baby Full Movie in English HD 720p from Utorrent

          - -
            -
          1. Open Utorrent and click on File > Add Torrent (or press Ctrl+O).
          2. -
          3. Browse to the location where you saved the torrent file or magnet link for Boss Baby English 720p Movie and select it.
          4. -
          5. Choose a folder where you want to save the downloaded movie file and click OK.
          6. -
          7. Wait for the download to complete. You can see the progress and speed of the download on the Utorrent interface.
          8. -
          9. When the download is finished, you can find the movie file in the folder that you chose in step 3.
          10. -
          - -

          How to Watch Boss Baby English 720p Movie in High Quality

          - -

          Now that you have downloaded Boss Baby English 720p Movie with Utorrent, you can watch it on your computer or laptop. However, if you want to have a better viewing experience, you might want to consider these tips:

          - -
            -
          • Use a media player software that supports high-definition video formats such as VLC Media Player, which you can download from here.
          • -
          • Adjust the brightness, contrast and color settings of your monitor or laptop screen according to your preference and lighting conditions.
          • -
          • Use headphones or speakers to enhance the sound quality and volume of the movie.
          • -
          • Avoid any distractions or interruptions while watching the movie such as phone calls, messages or notifications.
          • -
          - -

          How to Avoid Legal Issues When Downloading Boss Baby English 720p Movie with Utorrent

          - -

          Downloading Boss Baby English 720p Movie with Utorrent might seem like a convenient and easy way to watch the movie, but it also comes with some risks and challenges. One of them is the possibility of facing legal issues for violating copyright laws or terms of service of some websites. To avoid this, you should follow these guidelines:

          - -
            -
          • Only download Boss Baby English 720p Movie with Utorrent from trusted and reputable sources that have permission from the original creators or distributors of the movie.
          • -
          • Do not share or distribute Boss Baby English 720p Movie with Utorrent to other people without their consent or authorization.
          • -
          • Delete Boss Baby English 720p Movie with Utorrent from your computer or laptop after watching it or within a reasonable period of time.
          • -
          • Support the original creators or distributors of Boss Baby English 720p Movie by buying their products or services if you like their work.
          • -
          - -

          Conclusion

          - -

          Boss Baby English 720p Movie Download Utorrent is a great option for animation lovers who want to watch a funny and entertaining movie. However, it also requires some preparation and caution to ensure a smooth and safe download process. By following our guide and tips, you can watch Boss Baby English 720p Movie in high quality and avoid any legal issues.

          - -

          We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

          -

          What is Boss Baby English 720p Movie About?

          - -

          Boss Baby English 720p Movie is a comedy film that revolves around the life of a seven-year-old boy named Tim Templeton, who has a happy and normal life with his parents. However, everything changes when his parents bring home a new baby brother, who turns out to be a talking, suit-wearing, briefcase-carrying spy for a corporation called Baby Corp. The baby, who calls himself Boss Baby, reveals that he is on a mission to stop the evil CEO of Puppy Co., Francis Francis, from launching a new product that will make people love puppies more than babies. Tim reluctantly agrees to help Boss Baby in exchange for getting his parents' attention back.

          - -

          The film is based on the 2010 picture book of the same name by Marla Frazee, and is directed by Tom McGrath, who also directed Madagascar and Megamind. The film features a star-studded voice cast, including Alec Baldwin as Boss Baby, Steve Buscemi as Francis Francis, Jimmy Kimmel and Lisa Kudrow as Tim's parents, and Tobey Maguire as the narrator and adult Tim. The film also has a catchy soundtrack composed by Hans Zimmer and Steve Mazzaro, featuring songs by The Beatles, Elvis Presley and Kool & The Gang.

          - -

          Boss Baby English 720p Movie is a fun and entertaining film that appeals to both children and adults. It has a lot of humor, action, adventure and heartwarming moments. It also delivers a positive message about family, love and loyalty. If you are looking for a movie that will make you laugh and smile, you should definitely watch Boss Baby English 720p Movie with Utorrent.

          - -

          Conclusion

          - -

          Boss Baby English 720p Movie Download Utorrent is a great option for animation lovers who want to watch a funny and entertaining movie. However, it also requires some preparation and caution to ensure a smooth and safe download process. By following our guide and tips, you can watch Boss Baby English 720p Movie in high quality and avoid any legal issues.

          - -

          We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

          -

          What are the Benefits of Downloading Boss Baby English 720p Movie with Utorrent?

          - -

          Downloading Boss Baby English 720p Movie with Utorrent has many benefits that make it a worthwhile option for movie lovers. Here are some of them:

          - -
            -
          • It is free and easy. You do not need to pay any fees or subscriptions to download Boss Baby English 720p Movie with Utorrent. You also do not need any technical skills or knowledge to use Utorrent. All you need is a computer, an internet connection and a torrent file or magnet link.
          • -
          • It is fast and efficient. You can download Boss Baby English 720p Movie with Utorrent in a matter of minutes or hours, depending on your internet speed and the availability of other users who have the file. You can also pause and resume the download at any time, without losing any progress.
          • -
          • It is flexible and customizable. You can choose the quality and format of Boss Baby English 720p Movie that you want to download, such as 1080p, 720p or 480p. You can also select the audio and subtitle options that suit your preferences, such as Hindi or English.
          • -
          • It is social and interactive. You can share Boss Baby English 720p Movie with Utorrent with other users who are interested in the movie, as well as comment and rate it. You can also join online communities and forums that discuss and review Boss Baby English 720p Movie and other movies.
          • -
          - -

          What are the Drawbacks of Downloading Boss Baby English 720p Movie with Utorrent?

          - -

          Downloading Boss Baby English 720p Movie with Utorrent also has some drawbacks that you should be aware of before you start the download process. Here are some of them:

          - -
            -
          • It is risky and illegal. Downloading Boss Baby English 720p Movie with Utorrent might expose you to viruses, malware and other harmful software that can damage your computer or steal your personal information. It might also violate the copyright laws or terms of service of some websites that host or distribute the movie. You might face legal consequences such as fines or lawsuits if you are caught downloading Boss Baby English 720p Movie with Utorrent.
          • -
          • It is unreliable and unpredictable. Downloading Boss Baby English 720p Movie with Utorrent depends on the availability and speed of other users who have the file. You might encounter slow or interrupted downloads, incomplete or corrupted files, or fake or misleading files. You might also face difficulties in finding a good torrent file or magnet link for Boss Baby English 720p Movie.
          • -
          • It is unethical and unfair. Downloading Boss Baby English 720p Movie with Utorrent might deprive the original creators or distributors of the movie of their rightful income and recognition. It might also discourage them from making more quality movies in the future. You might miss out on the benefits of supporting the movie industry and enjoying its products legally.
          • -
          - -

          Conclusion

          - -

          Boss Baby English 720p Movie Download Utorrent is a great option for animation lovers who want to watch a funny and entertaining movie. However, it also requires some preparation and caution to ensure a smooth and safe download process. By following our guide and tips, you can watch Boss Baby English 720p Movie in high quality and avoid any legal issues.

          - -

          We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

          -

          Conclusion

          - -

          Boss Baby English 720p Movie Download Utorrent is a great option for animation lovers who want to watch a funny and entertaining movie. However, it also requires some preparation and caution to ensure a smooth and safe download process. By following our guide and tips, you can watch Boss Baby English 720p Movie in high quality and avoid any legal issues.

          - -

          We hope that this article was helpful and informative for you. If you have any questions or feedback, feel free to leave a comment below. Thank you for reading!

          679dcb208e
          -
          -
          \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Embarcadero RAD Studio 10.3.3 Version 26.0.36039.7899 Enhancements to VCL FireMonkey and InterBase.md b/spaces/tialenAdioni/chat-gpt-api/logs/Embarcadero RAD Studio 10.3.3 Version 26.0.36039.7899 Enhancements to VCL FireMonkey and InterBase.md deleted file mode 100644 index 4ebd9982ae6fc4f7e762a198d2beebd877b94697..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Embarcadero RAD Studio 10.3.3 Version 26.0.36039.7899 Enhancements to VCL FireMonkey and InterBase.md +++ /dev/null @@ -1,216 +0,0 @@ -
          -

          Embarcadero RAD Studio 10.3.3: A Powerful IDE for Cross-Platform Development

          -

          If you are looking for a fast and easy way to create native applications for Windows, macOS, iOS, Android, and Linux, you should check out Embarcadero RAD Studio 10.3.3. This integrated development environment (IDE) allows you to use the same code base to build stunning, high-performing apps for multiple platforms.

          -

          In this article, we will give you an overview of what Embarcadero RAD Studio 10.3.3 is, what's new in this release, and how to get started with it.

          -

          Embarcadero RAD Studio 10.3.3 Version 26.0.36039.7899


          DOWNLOAD ☆☆☆☆☆ https://urlcod.com/2uK5MI



          -

          What is Embarcadero RAD Studio 10.3.3?

          -

          Embarcadero RAD Studio 10.3.3 is the latest version of the award-winning IDE that empowers developers to create cross-platform applications using two powerful programming languages: Delphi and C++Builder.

          -

          Delphi is a modern object-oriented Pascal language that offers a rich set of components, libraries, and frameworks for building GUI, database, web, mobile, cloud, and IoT applications.

          -

          C++Builder is a C++ IDE that leverages the same components, libraries, and frameworks as Delphi, but also supports the latest C++ standards and features.

          -

          With Embarcadero RAD Studio 10.3.3, you can:

          -
            -
          • Write once and compile natively for Windows, macOS, iOS, Android, and Linux
          • -
          • Use a single UI framework (FireMonkey) to design responsive and beautiful user interfaces
          • -
          • Access over 70 data sources with Enterprise Connectors
          • -
          • Build scalable and secure multi-tier applications with RAD Server
          • -
          • Integrate with cloud services and APIs using REST Client Library
          • -
          • Leverage the power of parallel programming and asynchronous calls
          • -
          • Debug and profile your code with advanced tools
          • -
          • Test your applications on real devices or simulators/emulators
          • -
          • Distribute your applications through app stores or web installers
          • -
          -

          What's New in Embarcadero RAD Studio 10.3.3?

          -

          Embarcadero RAD Studio 10.3.3 builds on the feature set of previous releases by adding new capabilities throughout the product designed to benefit Delphi and C++ developers.

          -

          Android 64-bit support for Delphi

          -

          New in 10.3.3 is Android 64-bit application support for Delphi. This means you can now build Google Play Store ready Android 64-bit applications, complete with Android App Bundle support.

          -

          This is important because Google requires all new apps and app updates to support 64-bit architectures since August 2019.

          -

          How to build Android 64-bit applications with Delphi

          -

          To build Android 64-bit applications with Delphi, you need to:

          -

          How to install Embarcadero RAD Studio 10.3.3 on Windows 10
          -Embarcadero RAD Studio 10.3.3 crack download free
          -Embarcadero RAD Studio 10.3.3 tutorial for beginners
          -Embarcadero RAD Studio 10.3.3 vs Visual Studio 2019
          -Embarcadero RAD Studio 10.3.3 license key generator
          -Embarcadero RAD Studio 10.3.3 update patch notes
          -Embarcadero RAD Studio 10.3.3 features and benefits
          -Embarcadero RAD Studio 10.3.3 review and rating
          -Embarcadero RAD Studio 10.3.3 price and discount
          -Embarcadero RAD Studio 10.3.3 system requirements and compatibility
          -Embarcadero RAD Studio 10.3.3 support and customer service
          -Embarcadero RAD Studio 10.3.3 alternatives and competitors
          -Embarcadero RAD Studio 10.3.3 documentation and reference
          -Embarcadero RAD Studio 10.3.3 best practices and tips
          -Embarcadero RAD Studio 10.3.3 bug fixes and troubleshooting
          -Embarcadero RAD Studio 10.3.3 roadmap and future plans
          -Embarcadero RAD Studio 10.3.3 webinar and training
          -Embarcadero RAD Studio 10.3.3 community and forum
          -Embarcadero RAD Studio 10.3.3 case studies and testimonials
          -Embarcadero RAD Studio 10.3.3 comparison chart and matrix
          -Embarcadero RAD Studio 10.3.3 demo and trial version
          -Embarcadero RAD Studio 10.3.3 release date and history
          -Embarcadero RAD Studio 10.3.3 FAQ and Q&A
          -Embarcadero RAD Studio 10.3.3 pros and cons
          -Embarcadero RAD Studio 10.3.3 coupon code and promo offer
          -Embarcadero RAD Studio 10.3.3 ISO file and offline installer
          -Embarcadero RAD Studio 10.3.3 components and libraries
          -Embarcadero RAD Studio 10.3.3 development environment and tools
          -Embarcadero RAD Studio 10.3.3 languages and frameworks
          -Embarcadero RAD Studio 10.3.3 editions and versions
          -Embarcadero RAD Studio 10.3.3 migration guide and checklist
          -Embarcadero RAD Studio 10.3.3 integration and interoperability
          -Embarcadero RAD Studio 10.3.3 security and privacy
          -Embarcadero RAD Studio 10.3.3 performance and optimization
          -Embarcadero RAD Studio 10.3.3 quality and reliability
          -Embarcadero RAD Studio 10.3

          -
            -
          1. Install the Android SDK 26.0.2 or later
          2. -
          3. Select the Android 64-bit platform in the Project Manager
          4. -
          5. Add the required libraries to the SDK Manager
          6. -
          7. Configure the deployment options for Android App Bundle or APK
          8. -
          9. Compile and deploy your application to a device or emulator
          10. -
          -

          You can find more details on how to do this in this documentation page.

          -

          InterBase support for Delphi Android 64-bit

          -

          If you want to use a fast and reliable embedded database for your Android applications, you can now take advantage of InterBase support for Delphi Android 64-bit.

          -

          InterBase is a cross-platform database that offers encryption, change tracking, disaster recovery, synchronization, and more.

          -

          You can use InterBase in two modes: IBLite or IBToGo.

          -

          IBLite is a free edition of InterBase that allows you to embed a database of up to 100 MB in size into your application.

          -

          IBToGo is a commercial edition of InterBase that allows you to embed a full-featured database of unlimited size into your application.

          -

          You can find more information on how to use InterBase with Delphi Android 64-bit in this documentation page.

          -

          iOS 13 and macOS Catalina support

          -

          New in 10.3.3 is support for targeting iOS 13 App Store (for Delphi and C++Builder) and macOS Catalina (Delphi only).

          -

          This means you can now build App Store ready iOS 13 mobile applications, complete with Dark Theme support.

          -

          You can also target macOS Catalina with 64-bit macOS Delphi apps, complete with notarization support.

          -

          How to target iOS 13 and macOS Catalina with Delphi and C++Builder

          -

          To target iOS 13 App Store with Delphi or C++Builder, you need to:

          -
            -
          1. Install Xcode 11 or later on your Mac
          2. -
          3. Select the iOS Device - 64 bit platform in the Project Manager
          4. -
          5. Add the required frameworks to the SDK Manager
          6. -
          7. Configure the provisioning profile and certificate for your application
          8. -
          9. Compile and deploy your application to a device or simulator
          10. -
          -

          To target macOS Catalina with Delphi, you need to:

          -
            -
          1. Select the macOS platform in the Project Manager
          2. -
          3. Add the required libraries to the SDK Manager
          4. -
          5. Configure the entitlements file and code signing options for your application
          6. -
          7. Compile your application and copy it to your Mac
          8. -
          9. Notarize your application using the Deployment Manager or manually
          10. -
          11. Distribute your application through a web installer or other methods
          12. -
          -

          You can find more details on how to do this in this documentation page.

          -

          Dark Theme support for iOS 13

          -

          If you want your iOS applications to look great in both light and dark modes, you can now use the Dark Theme support for iOS 13.

          -

          This feature allows you to automatically adapt your user interface elements to the system appearance settings.

          -

          You can also customize the appearance of specific controls or forms using style hooks or custom styles.

          -

          You can find more information on how to use Dark Theme support for iOS 13 in this documentation page.

          -

          RAD Server Docker deployment

          -

          New in 10.3.3 is RAD Server Docker deployment and configuration support through built-in scripts.

          -

          This feature allows you to easily deploy your RAD Server multi-tier applications on Linux using Docker containers.

          -

          Docker is a software platform that enables you to build, run, and share applications using containers.

          -

          Containers are isolated environments that contain everything I'll try to continue the article. Here is the rest of the article with HTML formatting:

          you need to run your applications, such as code, libraries, configuration files, and dependencies.

          -

          Using Docker, you can simplify the deployment and management of your RAD Server applications, as well as scale them up or down as needed.

          -

          How to use RAD Server Docker image for Linux

          -

          To use RAD Server Docker image for Linux, you need to:

          -
            -
          1. Install Docker on your Linux machine
          2. -
          3. Pull the pa-radserver or pa-radserver-ib image from Docker Hub
          4. -
          5. Create and run a container based on the image
          6. -
          7. Deploy your custom RAD Server resource modules to the container
          8. -
          9. Test and access your RAD Server endpoints
          10. -
          -

          You can find more details on how to do this in this documentation page.

          -

          Benefits of RAD Server Docker deployment

          -

          Some of the benefits of using RAD Server Docker deployment are:

          -
            -
          • You can easily create and destroy RAD Server instances as needed
          • -
          • You can isolate your RAD Server applications from other processes and resources
          • -
          • You can automate the configuration and deployment of your RAD Server applications
          • -
          • You can improve the portability and compatibility of your RAD Server applications
          • -
          • You can leverage the existing Docker tools and ecosystem
          • -
          -

          How to Get Started with Embarcadero RAD Studio 10.3.3?

          -

          If you are interested in trying out Embarcadero RAD Studio 10.3.3, you can download and install it for free for 30 days.

          -

          Download and install Embarcadero RAD Studio 10.3.3

          -

          To download and install Embarcadero RAD Studio 10.3.3, you need to:

          -
            -
          1. Go to this page and fill out the form to request a trial license
          2. -
          3. Check your email for the download link and activation code
          4. -
          5. Download the installer and run it on your Windows machine
          6. -
          7. Follow the installation steps and enter your activation code when prompted
          8. -
          9. Enjoy your free trial of Embarcadero RAD Studio 10.3.3
          10. -
          -

          System requirements and installation steps

          -

          To install Embarcadero RAD Studio 10.3.3, you need a Windows machine with at least:

          -
            -
          • Windows 7 SP1 or later (64-bit)
          • -
          • 1.6 GHz or faster processor
          • -
          • 2 GB of RAM (4 GB recommended)
          • -
          • Between 6 GB and 60 GB of available disk space depending on edition and configuration
          • -
          • DVD-ROM drive (if installing from a Media Kit DVD)
          • -
          • Basic GPU (any vendor)
          • -
          • 1024x768 or higher-resolution monitor
          • -
          • Maus oder kompatibles Eingabegerät I'll try to continue the article. Here is the rest of the article with HTML formatting:

            Maus oder kompatibles Eingabegerät

            -

            You can find more details on the system requirements and installation steps in this documentation page.

            -

            Update subscription benefits and options

            -

            If you already have an active update subscription for Embarcadero RAD Studio, you can download and install Embarcadero RAD Studio 10.3.3 for free.

            -

            If you don't have an active update subscription, you can purchase one and enjoy the following benefits:

            -
              -
            • Access to the latest product updates and features
            • -
            • Priority technical support
            • -
            • Exclusive bonus content and add-ons
            • -
            • Early access to beta versions and previews
            • -
            • Discounts on additional licenses and renewals
            • -
            -

            You can find more information on the update subscription benefits and options in this page.

            -

            Explore the product features and capabilities

            -

            Once you have installed Embarcadero RAD Studio 10.3.3, you can start exploring its features and capabilities by using the sample projects and tutorials provided.

            -

            Sample projects and tutorials

            -

            Embarcadero RAD Studio 10.3.3 comes with a rich collection of sample projects and tutorials that demonstrate how to use various aspects of the product.

            -

            You can find the sample projects in the following folder:

            -

            C:\Users\Public\Documents\Embarcadero\Studio\21.0\Samples

            -

            You can also access them from the Welcome Page or the Projects Window in the IDE.

            -

            The sample projects are organized by language (C++ or Delphi), platform (Windows, macOS, iOS, Android, or Linux), and topic (FireMonkey, VCL, Database, Web, etc.).

            -

            You can also find more sample projects on GitHub at https://github.com/Embarcadero.

            -

            You can find the tutorials in the documentation wiki at https://docwiki.embarcadero.com/RADStudio/Rio/en/Tutorials.

            -

            The tutorials cover various topics such as creating your first application, working with components, using databases, developing web services, and more.

            -

            Documentation and resources

            -

            If you need more help or information about Embarcadero RAD Studio 10.3.3, you can consult the following sources:

            - -

            Conclusion

            -

            In this article, we have introduced you to Embarcadero RAD Studio 10.3.3, a powerful IDE for cross-platform development using Delphi and C++Builder.

            -

            We have shown you what Embarcadero RAD Studio 10.3.3 is, what's new in this release, and how to get started with it.

            -

            We hope you have enjoyed this article and found it useful. If you want to learn more about Embarcadero RAD Studio 10.3.3, we encourage you to download it for free for 30 days and try it out yourself.

            -

            Thank you for reading!

            -

            Frequently Asked Questions

            -
              -
            1. What is Embarcadero RAD Studio?
            2. -
            3. Embarcadero RAD Studio is an integrated development environment (IDE) that allows you to create cross-platform applications using Delphi and C++Builder.

            4. -
            5. What are the main features of Embarcadero RAD Studio 10.3.3?
            6. -
            7. Some of the main features of Embarcadero RAD Studio 10.3.3 are:

              -
                -
              • Android 64-bit support for Delphi
              • -
              • iOS 13 and macOS Catalina support for Delphi and C++Builder
              • -
              • RAD Server Docker deployment for Linux
              • -
              • Enterprise Connectors subscription license for Enterprise and Architect edition users
              • -
              • C++17 support for Windows 64-bit for C++Builder
              • -
              • Firebase push notification support for Android for Delphi and C++Builder
              • -
              • Dark Theme support for iOS 13 for Delphi and C++Builder
              • -
              • And many more...
              • -
            8. -
            9. How can I download and install Embarcadero RAD Studio 10.3.3?
            10. -
            11. You can download and install Embarcadero RAD Studio 10.3.3 for free for 30 days by going to this page and filling out the form to request a trial license.

            12. -
            13. How can I deploy my applications with Embarcadero RAD Studio 10.3.3?
            14. -
            15. You can deploy your applications with Embarcadero RAD Studio 10.3.3 through app stores or web installers for Windows, macOS, iOS, Android, or Linux platforms.

            16. -
            17. How can I learn more about Embarcadero RAD Studio 10.3.3?
            18. -
            19. You can learn more about Embarcadero RAD Studio 10.3.3 by using the sample projects and tutorials provided with the product, consulting the documentation wiki and the product support page, taking online courses at the Embarcadero Academy, or joining the Embarcadero Community.

            20. -
            -

            0a6ba089eb
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Ghatothkach Download Movie Torrent Enjoy the Spectacular Visuals and Soundtrack of the Film.md b/spaces/tialenAdioni/chat-gpt-api/logs/Ghatothkach Download Movie Torrent Enjoy the Spectacular Visuals and Soundtrack of the Film.md deleted file mode 100644 index 08ccdca7ee61cf4cc66d191532f2a9d367667f22..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Ghatothkach Download Movie Torrent Enjoy the Spectacular Visuals and Soundtrack of the Film.md +++ /dev/null @@ -1,111 +0,0 @@ - -

            Ghatothkach Download Movie Torrent: A Guide for Fans of Indian Mythology

            - -

            If you are a fan of Indian mythology, you might have heard of Ghatothkach, the son of Bhima and Hidimba from the epic Mahabharata. He was a mighty warrior who possessed magical powers and could change his size and shape at will. He also had a loyal elephant friend named Gajju who accompanied him on his adventures.

            -

            Ghatothkach Download Movie Torrent


            Download File https://urlcod.com/2uK34I



            - -

            Ghatothkach is the protagonist of a 2008 animated film that depicts his life story from his birth to his death in the Kurukshetra war. The film features the voices of popular singers like Daler Mehndi, Shreya Ghoshal, Sunidhi Chauhan, and others. The film also has a sequel called Ghatothkach 2 that was released in 2013.

            - -

            If you want to watch this film, you might be wondering how to download Ghatothkach movie torrent for free. In this article, we will show you how to do that safely and legally.

            - -

            What is a Torrent?

            - -

            A torrent is a file that contains information about other files that are shared over a peer-to-peer network. This means that instead of downloading a file from a single source, you download it from multiple sources that have the same file. This makes the download faster and more efficient.

            - -

            However, downloading torrents also comes with some risks. You might encounter malware, viruses, or legal issues if you download copyrighted content without permission. Therefore, you need to be careful and use some precautions when downloading torrents.

            - -

            How to Download Ghatothkach Movie Torrent Safely and Legally?

            - -

            To download Ghatothkach movie torrent safely and legally, you need to follow these steps:

            - -
              -
            1. Find a reliable torrent site that offers Ghatothkach movie torrent. Some of the best torrent sites for movies are 1337x, YTS, and TechNadu. You can use these sites to search for Ghatothkach movie torrent and see the ratings, reviews, and comments of other users.
            2. -
            3. Use a VPN (Virtual Private Network) to hide your IP address and encrypt your online traffic. This will prevent your Internet provider from tracking your online activity and blocking your access to torrent sites. It will also protect you from hackers, malware, and legal issues. Some of the best VPNs for torrenting are ExpressVPN, NordVPN, and Surfshark.
            4. -
            5. Download a torrent client that can handle torrent files and magnet links. A torrent client is a software that allows you to download torrents from other peers. Some of the best torrent clients are uTorrent, BitTorrent, and qBittorrent.
            6. -
            7. Open the torrent client and click on the Ghatothkach movie torrent link or magnet link from the torrent site. This will start the download process and show you the progress, speed, and peers.
            8. -
            9. Wait for the download to finish and enjoy watching Ghatothkach movie on your device.
            10. -
            - -

            That's it! You have successfully downloaded Ghatothkach movie torrent for free. We hope you enjoy this animated film that showcases the legend of Ghatothkach and his elephant friend Gajju.

            -

            What is Ghatothkach Movie About?

            - -

            Ghatothkach movie is an animated film that tells the story of Ghatothkach, the son of Bhima and Hidimba from the Indian epic Mahabharata. He was born with a blessing from Lord Shiva that gave him extraordinary powers and abilities. He could fly, change his size and shape, and create illusions with his magic.

            - -

            The film follows his adventures in the jungle with his elephant friend Gajju, his love story with Surekha, the princess of Kashi, and his role in the Kurukshetra war between the Pandavas and the Kauravas. The film also showcases the culture and traditions of ancient India, such as the swayamvara ceremony, the rakshasa rituals, and the war strategies.

            - -

            The film is a blend of comedy, romance, action, and drama that appeals to both children and adults. The film also features songs by famous singers like Daler Mehndi, Shreya Ghoshal, Sunidhi Chauhan, and others. The film has a sequel called Ghatothkach 2 that was released in 2013.

            -

            Ghatothkach full movie download torrent
            -How to download Ghatothkach movie for free
            -Ghatothkach movie torrent link
            -Ghatothkach 2008 movie download
            -Download Ghatothkach animated movie in Hindi
            -Ghatothkach movie free download HD
            -Ghatothkach movie online watch
            -Ghatothkach master of magic movie download
            -Ghatothkach movie songs download
            -Ghatothkach movie review and ratings
            -Ghatothkach movie cast and crew
            -Ghatothkach movie trailer download
            -Ghatothkach movie subtitles download
            -Ghatothkach movie download in Tamil
            -Ghatothkach movie download in Telugu
            -Ghatothkach movie download in Malayalam
            -Ghatothkach movie download in Kannada
            -Ghatothkach movie download in Bengali
            -Ghatothkach movie download in Marathi
            -Ghatothkach movie download in Gujarati
            -Ghatothkach movie download in Punjabi
            -Ghatothkach movie download in Urdu
            -Ghatothkach movie download in English
            -Ghatothkach movie download 480p
            -Ghatothkach movie download 720p
            -Ghatothkach movie download 1080p
            -Ghatothkach movie download 4K
            -Ghatothkach movie download BluRay
            -Ghatothkach movie download DVDrip
            -Ghatothkach movie download MP4
            -Ghatothkach movie download MKV
            -Ghatothkach movie download AVI
            -Ghatothkach movie download MOV
            -Ghatothkach movie download FLV
            -Ghatothkach movie download WMV
            -Ghatothkach movie download M4V
            -Ghatothkach movie download WEBM
            -Ghatothkach movie torrent magnet link
            -Best sites to download Ghatothkach movie torrent
            -How to watch Ghatothkach movie without downloading
            -How to stream Ghatothkach movie online free
            -How to use VPN to download Ghatothkach movie torrent safely
            -How to avoid malware and viruses when downloading Ghatothkach movie torrent
            -How to seed and leech Ghatothkach movie torrent
            -How to use torrent clients to download Ghatothkach movie
            -How to find high-quality and fast downloads of Ghatothkach movie torrent
            -How to join private trackers for downloading Ghatothkach movie torrent
            -How to request re-seed of dead torrents of Ghatothkach movie
            -How to verify the authenticity and integrity of downloaded files of Ghatothkach movie
            -How to extract and play downloaded files of Ghatothkach movie

            - -

            What are the Reviews of Ghatothkach Movie?

            - -

            Ghatothkach movie received mixed reviews from critics and audiences. Some praised the film for its animation quality, voice acting, music, and story. They appreciated the film for bringing a lesser-known hero from the Mahabharata to life and for showcasing the rich heritage of India.

            - -

            However, some criticized the film for its lack of originality, coherence, and depth. They felt that the film was too long, too childish, and too predictable. They also pointed out some factual errors and inconsistencies in the film.

            - -

            The film has a rating of 7.3 out of 10 on IMDb and 2.5 out of 5 on Times of India. The film also won several awards and nominations at various national and international festivals.

            -

            Where to Watch Ghatothkach Movie Trailer?

            - -

            If you want to get a glimpse of Ghatothkach movie before downloading it, you can watch the trailer online. The trailer gives you an overview of the plot, the characters, the animation, and the music of the film. You can also see some of the magical scenes and action sequences that make the film entertaining.

            - -

            You can watch Ghatothkach movie trailer on YouTube, where it has been uploaded by various channels. You can also find the trailer on IMDb, where you can also read more information about the film, such as the cast, crew, trivia, and reviews. You can also watch the trailer on other websites that offer movie trailers and clips.

            - -

            What are the Alternatives to Ghatothkach Movie?

            - -

            If you are looking for more movies that are based on Indian mythology and feature animated characters, you have plenty of options to choose from. Here are some of the alternatives to Ghatothkach movie that you might enjoy:

            - -
              -
            • Return of Hanuman: This is a sequel to Hanuman, a 2005 animated film that narrates the story of Hanuman, the monkey god who helps Lord Rama in his battle against Ravana. The sequel shows Hanuman reincarnated as a young boy who helps his village from evil forces.
            • -
            • Dashavatar: This is a 2008 animated film that depicts the ten incarnations of Lord Vishnu, who takes different forms to save the world from evil in different eras. The film features stories of Matsya, Kurma, Varaha, Narasimha, Vamana, Parashurama, Rama, Krishna, Buddha, and Kalki.
            • -
            • Ramayana: The Epic: This is a 2010 animated film that retells the story of Ramayana, one of the most revered epics in Hinduism. The film follows the journey of Lord Rama, who goes into exile with his wife Sita and brother Lakshmana, and fights against Ravana, who abducts Sita.
            • -

            679dcb208e
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Grass Valley Canopus Edius 652 X64 X86torrent 5 Learn the Secrets of Professional Video Editors.md b/spaces/tialenAdioni/chat-gpt-api/logs/Grass Valley Canopus Edius 652 X64 X86torrent 5 Learn the Secrets of Professional Video Editors.md deleted file mode 100644 index 92d3e62fd0fe23e66e57a9130283c7b0024cb53c..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Grass Valley Canopus Edius 652 X64 X86torrent 5 Learn the Secrets of Professional Video Editors.md +++ /dev/null @@ -1,136 +0,0 @@ -
            -

            M3 Data Recovery Keygen Downloadl: A Risky Way to Recover Lost Data

            -

            If you have lost your data due to accidental deletion, formatting, corruption, or other reasons, you may be looking for a way to recover it. One of the software that claims to help you with this task is M3 Data Recovery, a tool that can recover deleted or lost data from hard disk and external drives within simple steps. It can also fix corrupted hard drive, recover data from RAW, inaccessible, or damaged drive, and recover data from Bitlocker encrypted volume.

            -

            M3 Data Recovery Keygen Downloadl


            Download 🆓 https://urlcod.com/2uKbc9



            -

            However, M3 Data Recovery is not a free software. You need to purchase a license key to activate the full version and continue recovering more files. The free edition only allows you to recover 1GB data. That's why some people are searching for M3 Data Recovery keygen downloadl, hoping to get a free activation code or serial number.

            -

            Why You Should Avoid M3 Data Recovery Keygen Downloadl

            -

            Using M3 Data Recovery keygen downloadl may seem like a good idea to save money, but it actually comes with many risks and disadvantages. Here are some of the possible harms of using M3 Data Recovery keygen downloadl:

            -
              -
            • Endless crashes during the recovery process. M3 Data Recovery keygen downloadl may not work properly and cause your system to crash frequently. This may damage your device and make your data unrecoverable.
            • -
            • Software virus. M3 Data Recovery keygen downloadl may contain virus or malware that will be invisibly installed on your devices. The virus may steal your personal information, damage your files, or even lock your device and ask for ransom.
            • -
            • No tech support, no updates, and data breach. M3 Data Recovery keygen downloadl does not provide any technical support or updates. You may encounter some errors or bugs that cannot be fixed. Moreover, using M3 Data Recovery keygen downloadl may violate the software license agreement and expose your data to unauthorized access.
            • -
            -

            The Best Alternative to M3 Data Recovery Keygen Downloadl

            -

            To avoid the potential risks of using M3 Data Recovery keygen downloadl, we recommend you to try a professional alternative - EaseUS Data Recovery Wizard. This is a powerful and reliable data recovery software that enjoys a high reputation in the data recovery industry. It can help you recover all kinds of files from various devices and scenarios with ease.

            -

            EaseUS Data Recovery Wizard has many advantages over M3 Data Recovery keygen downloadl:

            -

            M3 Data Recovery Crack Free Download
            -M3 Data Recovery License Key Generator
            -M3 Data Recovery Serial Key Download
            -M3 Data Recovery Activation Code Download
            -M3 Data Recovery Patch Download
            -M3 Data Recovery Full Version Download
            -M3 Data Recovery Pro Keygen Download
            -M3 Data Recovery Professional Crack Download
            -M3 Data Recovery Wizard Keygen Download
            -M3 Data Recovery Software Crack Download
            -M3 Data Recovery Tool Keygen Download
            -M3 Data Recovery Mac Crack Download
            -M3 Data Recovery Windows Crack Download
            -M3 Data Recovery Linux Crack Download
            -M3 Data Recovery Android Crack Download
            -M3 Data Recovery iOS Crack Download
            -M3 Data Recovery Portable Keygen Download
            -M3 Data Recovery Offline Keygen Download
            -M3 Data Recovery Online Keygen Download
            -M3 Data Recovery Torrent Keygen Download
            -M3 Data Recovery Mega Keygen Download
            -M3 Data Recovery Mediafire Keygen Download
            -M3 Data Recovery Zippyshare Keygen Download
            -M3 Data Recovery Rapidgator Keygen Download
            -M3 Data Recovery Uploaded Keygen Download
            -M3 Data Recovery 4shared Keygen Download
            -M3 Data Recovery Google Drive Keygen Download
            -M3 Data Recovery Dropbox Keygen Download
            -M3 Data Recovery OneDrive Keygen Download
            -M3 Data Recovery iCloud Keygen Download
            -M3 Data Recovery Latest Version Keygen Download
            -M3 Data Recovery Updated Version Keygen Download
            -M3 Data Recovery Premium Version Keygen Download
            -M3 Data Recovery Ultimate Version Keygen Download
            -M3 Data Recovery Enterprise Version Keygen Download
            -M3 Data Recovery Technician Version Keygen Download
            -M3 Data Recovery Home Version Keygen Download
            -M3 Data Recovery Business Version Keygen Download
            -M3 Data Recovery Standard Version Keygen Download
            -M3 Data Recovery Basic Version Keygen Download
            -How to download and install M3 Data Recovery keygen?
            -How to use M3 Data Recovery keygen to activate the software?
            -How to fix errors and issues with M3 Data Recovery keygen?
            -How to uninstall and remove M3 Data Recovery keygen?
            -How to update and upgrade M3 Data Recovery keygen?
            -Is it safe and legal to use M3 Data Recovery keygen?
            -What are the advantages and disadvantages of using M3 Data Recovery keygen?
            -What are the alternatives and competitors of using M3 Data Recovery keygen?
            -What are the features and benefits of using M3 Data Recovery keygen?
            -What are the reviews and ratings of using M3 Data Recovery keygen?

            -
              -
            • Safe and stable. EaseUS Data Recovery Wizard is 100% clean and secure. It does not contain any virus or malware. It also works smoothly and stably without crashing or freezing.
            • -
            • Easy and fast. EaseUS Data Recovery Wizard has a user-friendly interface and a simple wizard. You can recover your data in just three steps: select a location, scan for files, and preview and recover.
            • -
            • Comprehensive and versatile. EaseUS Data Recovery Wizard can recover more than 1000 types of files from various devices, such as PC, laptop, hard drive, SSD, USB flash drive, memory card, digital camera, etc. It can also handle different data loss situations, such as deletion, formatting, partition loss, virus attack, system crash, etc.
            • -
            • Tech support and updates. EaseUS Data Recovery Wizard provides free lifetime technical support and updates. You can contact the support team anytime if you have any questions or problems. You can also enjoy the latest features and improvements of the software.
            • -
            -

            How to Recover Your Data with EaseUS Data Recovery Wizard

            -

            If you want to try EaseUS Data Recovery Wizard instead of M3 Data Recovery keygen downloadl, you can follow these simple steps:

            -
              -
            1. Download and install EaseUS Data Recovery Wizard on your computer.
            2. -
            3. Launch the software and select the location where you lost your data.
            4. -
            5. Click "Scan" to start scanning for lost files.
            6. -
            7. Preview and select the files you want to recover.
            8. -
            9. Click "Recover" to save them to another safe location.
            10. -
            -

            Note: Do not save the recovered files to the same location where you lost them to avoid overwriting.

            -

            Conclusion

            -

            M3 Data Recovery keygen downloadl is not a wise choice to recover your lost data. It may bring more troubles and risks than benefits. Instead of using M3 Data Recovery keygen downloadl, we suggest you to use a professional alternative - EaseUS Data Recovery Wizard. This software can help you recover your data safely and easily without any hassle. You can download it now and give it a try!

            -

            How to Get M3 Data Recovery License Key Legally

            -

            If you want to use M3 Data Recovery without any risks or limitations, you need to get a legal license key from the official website. The license key will activate the full version of M3 Data Recovery and allow you to recover unlimited data from various devices and scenarios.

            -

            There are different types of license keys for M3 Data Recovery, such as Standard, Professional, Technician, and Enterprise. You can choose the one that suits your needs and budget. The price of M3 Data Recovery license key ranges from $69.95 to $499.95.

            -

            To get M3 Data Recovery license key legally, you can follow these steps:

            -
              -
            1. Visit the official website of M3 Data Recovery and select the product you want to buy.
            2. -
            3. Click "Buy Now" and choose the license type and quantity.
            4. -
            5. Fill in your billing information and payment method.
            6. -
            7. Confirm your order and complete the payment.
            8. -
            9. Check your email and find the license key sent by M3 Data Recovery.
            10. -
            11. Download and install M3 Data Recovery on your computer.
            12. -
            13. Launch the software and enter the license key to activate the full version.
            14. -
            -

            Note: If you do not receive the license key within 24 hours, please contact M3 Data Recovery support team for help.

            -

            Conclusion

            -

            M3 Data Recovery keygen downloadl is a risky way to recover lost data. It may cause more problems and damages than benefits. Instead of using M3 Data Recovery keygen downloadl, we recommend you to use a professional alternative - EaseUS Data Recovery Wizard. This software can help you recover your data safely and easily without any hassle. You can also get a legal license key for M3 Data Recovery from the official website if you want to use it without any limitations. We hope this article can help you make a wise decision and recover your data successfully.

            -

            How to Use M3 Data Recovery Safely and Effectively

            -

            If you have decided to use M3 Data Recovery legally with a valid license key, you may wonder how to use it safely and effectively. Here are some tips and tricks that can help you recover your data with M3 Data Recovery:

            -
              -
            • Backup your data regularly. The best way to prevent data loss is to backup your data regularly. You can use an external hard drive, a cloud service, or a backup software to create backups of your important files. This way, you can restore your data easily in case of any disaster.
            • -
            • Stop using your device immediately after data loss. If you have lost your data due to deletion, formatting, corruption, or other reasons, you should stop using your device immediately. Do not save any new files or perform any operations on your device. This can avoid overwriting your lost data and increase the chance of recovery.
            • -
            • Select the right recovery mode. M3 Data Recovery has different recovery modes for different situations. You should select the one that matches your data loss scenario. For example, if you want to recover deleted files, you can choose "Data Recovery" mode. If you want to recover data from a RAW or corrupted drive, you can choose "RAW Drive Recovery" mode. If you want to recover data from a Bitlocker encrypted drive, you can choose "Bitlocker Recovery" mode.
            • -
            • Scan the correct location. M3 Data Recovery allows you to scan a specific location where you lost your data. You should select the correct location to save time and improve accuracy. For example, if you lost your data from a USB flash drive, you should select the USB flash drive as the scan location.
            • -
            • Preview and filter the scan results. After scanning, M3 Data Recovery will display all the recoverable files in a list. You can preview and filter the scan results to find your desired files quickly. You can use the "Preview" function to check the file quality and content. You can also use the "Filter" function to sort the files by type, size, date, etc.
            • -
            • Recover and save your data to another location. Once you have found your lost files, you can select them and click "Recover" to save them to another safe location. Do not save the recovered files to the same location where you lost them to avoid overwriting.
            • -
            -

            Conclusion

            -

            M3 Data Recovery keygen downloadl is a risky way to recover lost data. It may cause more problems and damages than benefits. Instead of using M3 Data Recovery keygen downloadl, we recommend you to use a professional alternative - EaseUS Data Recovery Wizard. This software can help you recover your data safely and easily without any hassle. You can also get a legal license key for M3 Data Recovery from the official website if you want to use it without any limitations. We hope this article can help you make a wise decision and recover your data successfully.

            -

            FAQs about M3 Data Recovery Keygen Downloadl

            -

            In this section, we will answer some frequently asked questions about M3 Data Recovery keygen downloadl. If you have any other questions, please feel free to contact us or leave a comment below.

            -

            Q: Is M3 Data Recovery keygen downloadl safe?

            -

            A: No, M3 Data Recovery keygen downloadl is not safe. It may contain virus or malware that can harm your device and data. It may also cause endless crashes, data breach, or legal issues. You should avoid using M3 Data Recovery keygen downloadl and use a legal license key instead.

            -

            Q: How to get M3 Data Recovery license key for free?

            -

            A: There is no official way to get M3 Data Recovery license key for free. However, you can try some methods to get a discount or a free trial. For example, you can:

            -
              -
            • Subscribe to M3 Data Recovery newsletter and get a 20% off coupon code.
            • -
            • Share M3 Data Recovery on social media and get a 10% off coupon code.
            • -
            • Download M3 Data Recovery Free edition and recover up to 1GB data for free.
            • -
            • Participate in M3 Data Recovery giveaway or promotion events and win a free license key.
            • -
            -

            Q: How to use M3 Data Recovery license key?

            -

            A: To use M3 Data Recovery license key, you need to:

            -
              -
            1. Buy a license key from the official website of M3 Data Recovery.
            2. -
            3. Download and install M3 Data Recovery on your computer.
            4. -
            5. Launch the software and enter the license key to activate the full version.
            6. -
            -

            Conclusion

            -

            M3 Data Recovery keygen downloadl is a risky way to recover lost data. It may cause more problems and damages than benefits. Instead of using M3 Data Recovery keygen downloadl, we recommend you to use a professional alternative - EaseUS Data Recovery Wizard. This software can help you recover your data safely and easily without any hassle. You can also get a legal license key for M3 Data Recovery from the official website if you want to use it without any limitations. We hope this article can help you make a wise decision and recover your data successfully.

            -

            Conclusion

            -

            M3 Data Recovery keygen downloadl is a risky way to recover lost data. It may cause more problems and damages than benefits. Instead of using M3 Data Recovery keygen downloadl, we recommend you to use a professional alternative - EaseUS Data Recovery Wizard. This software can help you recover your data safely and easily without any hassle. You can also get a legal license key for M3 Data Recovery from the official website if you want to use it without any limitations. We hope this article can help you make a wise decision and recover your data successfully.

            679dcb208e
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/AetherSX2 APK - Download and Play the Most Awesome Game on Android.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/AetherSX2 APK - Download and Play the Most Awesome Game on Android.md deleted file mode 100644 index dc43111475020bdc6607b5957aa46ac5dbcdb93d..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/AetherSX2 APK - Download and Play the Most Awesome Game on Android.md +++ /dev/null @@ -1,194 +0,0 @@ -
            -
            - BIOS file and game ISOs | | H2: How to download AetherSX2 APK from the official website? | - Step-by-step instructions with screenshots
            - How to enable unknown sources and install the APK file | | H2: How to download AetherSX2 APK from the Google Play Store? | - Step-by-step instructions with screenshots
            - How to update the app and manage permissions | | H2: How to configure AetherSX2 settings and play games? | - How to choose the graphics renderer and resolution
            - How to load the BIOS file and game ISOs
            - How to use touchscreen controls or Bluetooth controllers | | H3: Tips and tricks for AetherSX2 | - How to improve performance and compatibility
            - How to save and load states
            - How to use widescreen patches and upscaling | | H1: Conclusion | Summary of the main points and benefits of AetherSX2 | | H4: FAQs | - What is the difference between AetherSX2 and other PS2 emulators for Android?
            - Is AetherSX2 legal and safe to use?
            - Where can I get BIOS files and game ISOs for AetherSX2?
            - How can I report bugs or request features for AetherSX2?
            - Can I play multiplayer games with AetherSX2? | Table 2: Article with HTML formatting

            How to Download AetherSX2 APK: A PS2 Emulator for Android

            -

            If you are a fan of PlayStation 2 games and want to enjoy them on your Android device, you might be interested in downloading AetherSX2 APK. AetherSX2 is a PS2 emulator for Android that lets you play a wide range of PS2 titles on your smartphone or tablet. It is based on the popular PCSX2 emulator for PC, which means it has a high compatibility rate and many features. In this article, we will show you how to download AetherSX2 APK from the official website or the Google Play Store, how to install it on your device, how to configure it, and how to play games with it. We will also share some tips and tricks to make your gaming experience even better.

            -

            What are the requirements for AetherSX2?

            -

            Before you download AetherSX2 APK, you need to make sure that your device meets the minimum requirements for running the emulator. According to the developer, you need a device with at least four large CPU cores (Cortex-A75 or higher), an Adreno GPU (preferably Snapdragon 845 or higher), and 4GB of RAM or more. You also need Android 7.0 or higher as your operating system.

            -

            download aethersx2 apk


            DOWNLOAD ····· https://bltlly.com/2uOmzq



            -

            In addition to your device specifications, you also need a BIOS file and game ISOs to play PS2 games with AetherSX2. The BIOS file is a software that runs the PS2 hardware, and it is required for the emulator to work. You can dump your own BIOS file from your PS2 console using a USB flash drive or a memory card adapter, or you can search online for one (but be careful of legal issues). The game ISOs are digital copies of your PS2 discs, which you can also dump yourself using a PC or a modded PS2, or you can download them from various websites (again, be mindful of legal issues).

            -

            How to download AetherSX2 APK from the official website?

            -

            If you want to download AetherSX2 APK from the official website, you can follow these steps:

            -
              -
            1. Open your browser and go to https://aethersx2.com, which is the official website of the emulator.
            2. -
            3. Tap on the Download button at the top right corner of the screen.
            4. -
            5. You will see a list of available versions of the emulator. Choose the latest one (the one with the highest number) and tap on it.
            6. -
            7. You will be redirected to another page where you can see more details about the version. Tap on Download again.
            8. -
            9. You will see a pop-up asking if you want to download this type of file. Tap on OK to confirm.
            10. -
            11. The download will start and you will see a progress bar at the bottom of the screen. Wait until it finishes.
            12. -
            13. Once the download is complete, you will see a notification saying that the file is ready to open. Tap on it.
            14. -
            15. You will see a pop-up asking if you want to install this application. Tap on Install.
            16. -
            17. If you see a message saying that your device is not allowed to install unknown apps from this source, tap on Settings.
            18. -
            19. You will be taken to the security settings of your device. Find the option that says Allow from this source and toggle it on.
            20. -
            21. Go back to the installation screen and tap on Install again.
            22. -
            23. The installation will start and you will see a progress bar. Wait until it finishes.
            24. -
            25. Once the installation is complete, you will see a message saying that the app has been installed. Tap on Open to launch the emulator.
            26. -
            -

            Congratulations, you have successfully downloaded and installed AetherSX2 APK from the official website!

            -

            How to download AetherSX2 APK from the Google Play Store?

            -

            If you prefer to download AetherSX2 APK from the Google Play Store, you can follow these steps:

            -
              -
            1. Open the Google Play Store app on your device and search for AetherSX2.
            2. -
            3. You will see the app icon with the name AetherSX2 - PS2 Emulator for Android. Tap on it.
            4. -
            5. You will see the app page with more information about the emulator, such as screenshots, ratings, reviews, and description. Tap on Install to download the app.
            6. -
            7. The download will start and you will see a progress bar at the top of the screen. Wait until it finishes.
            8. -
            9. Once the download is complete, you will see a notification saying that the app is ready to open. Tap on it.
            10. -
            11. You will see a pop-up asking if you want to allow AetherSX2 to access photos, media, and files on your device. Tap on Allow to grant permission.
            12. -
            13. The emulator will launch and you will see the main menu with four options: Games, Settings, About, and Exit. You have successfully downloaded and installed AetherSX2 APK from the Google Play Store!
            14. -
            -

            Note: If there is an update available for the app, you will see a notification saying that a new version is ready to install. Tap on Update to download and install the latest version of AetherSX2. You can also check for updates manually by going to the Google Play Store app, tapping on My apps & games, and looking for AetherSX2 in the list of installed apps. If you see an Update button next to it, tap on it to update the app.

            -

            How to configure AetherSX2 settings and play games?

            -

            Now that you have downloaded and installed AetherSX2 APK, you need to configure some settings and load your BIOS file and game ISOs to play PS2 games with the emulator. Here are some steps to help you do that:

            -

            How to choose the graphics renderer and resolution?

            -

            AetherSX2 supports two graphics renderers: OpenGL and Vulkan. OpenGL is more compatible with most devices and games, but Vulkan offers better performance and quality on some devices and games. You can choose which renderer you want to use by following these steps:

            -

            How to download aethersx2 apk for android
            -Aethersx2 apk free download latest version
            -Aethersx2 apk download uptodown
            -Best ps2 games to play on aethersx2 apk
            -Aethersx2 apk emulator settings and configuration
            -Aethersx2 apk bios file download
            -Aethersx2 apk compatible devices and requirements
            -Aethersx2 apk vs damonps2 pro comparison
            -Aethersx2 apk cheats and codes for ps2 games
            -Aethersx2 apk modded version with premium features
            -Aethersx2 apk review and rating by users
            -Aethersx2 apk troubleshooting and error fixing
            -Aethersx2 apk online multiplayer support
            -Aethersx2 apk gamepad and controller support
            -Aethersx2 apk save and load state feature
            -Aethersx2 apk performance and speed optimization
            -Aethersx2 apk graphics and sound quality enhancement
            -Aethersx2 apk custom roms and skins download
            -Aethersx2 apk iso files and roms download sites
            -Aethersx2 apk installation guide and tutorial
            -Aethersx2 apk alternatives and similar apps
            -Aethersx2 apk update and new features
            -Aethersx2 apk faq and tips
            -Aethersx2 apk forum and community
            -Aethersx2 apk official website and developer contact
            -Download aethersx2 apk from google play store
            -Download aethersx2 apk from apkpure
            -Download aethersx2 apk from apkmirror
            -Download aethersx2 apk from apktada
            -Download aethersx2 apk from apknite
            -Download aethersx2 apk from apksfull
            -Download aethersx2 apk from apksmodded
            -Download aethersx2 apk from apksfree
            -Download aethersx2 apk from apksapp
            -Download aethersx2 apk from apksbest
            -Download aethersx2 apk from apksfunny
            -Download aethersx2 apk from apkscool
            -Download aethersx2 apk from apksawesome
            -Download aethersx2 apk from apksamazing
            -Download aethersx2 apk from apksfantastic
            -Download aethersx2 apk for pc windows 10/8/7
            -Download aethersx2 apk for mac os x
            -Download aethersx2 apk for linux ubuntu
            -Download aethersx2 apk for chromebook
            -Download aethersx2 apk for firestick tv
            -Download aethersx2 apk for smart tv
            -Download aethersx2 apk for ios iphone/ipad/ipod touch

            -
              -
            1. Launch AetherSX2 and tap on Settings.
            2. -
            3. Tap on Graphics Settings.
            4. -
            5. Tap on Renderer and choose either OpenGL or Vulkan.
            6. -
            -

            You can also change the resolution of the emulator by following these steps:

            -
              -
            1. Launch AetherSX2 and tap on Settings.
            2. -
            3. Tap on Graphics Settings.
            4. -
            5. Tap on Resolution and choose one of the options: Native (480x272), 1x (960x544), 2x (1920x1088), or 3x (2880x1632).
            6. -
            -

            Note: Higher resolutions may improve the visual quality of some games, but they may also cause performance issues or glitches on some devices or games. Experiment with different resolutions to find the best one for your device and game.

            -

            How to load the BIOS file and game ISOs?

            -

            To play PS2 games with AetherSX2, you need to load a BIOS file and game ISOs. You can do that by following these steps:

            -
              -
            1. Launch AetherSX2 and tap on Settings.
            2. -
            3. Tap on BIOS Settings.
            4. -
            5. Tap on Select BIOS File.
            6. -
            7. Navigate to the folder where you have stored your BIOS file (usually in the Download folder or a PS2 BIOS folder) and tap on it.
            8. -
            9. You will see a message saying that the BIOS file has been loaded successfully. Tap on OK to confirm.
            10. -
            11. Go back to the main menu and tap on Games.
            12. -
            13. Tap on the + icon at the bottom right corner of the screen.
            14. -
            15. Navigate to the folder where you have stored your game ISOs (usually in the Download folder or a PS2 Games folder) and tap on the ones you want to add to the emulator.
            16. -
            17. You will see a message saying that the game ISOs have been added successfully. Tap on OK to confirm.
            18. -
            19. You will see a list of your games with their cover art and name. Tap on the one you want to play.
            20. -
            21. The emulator will load the game and you will see the PS2 logo and the game intro. Enjoy!
            22. -
            -

            How to use touchscreen controls or Bluetooth controllers?

            -

            AetherSX2 supports both touchscreen controls and Bluetooth controllers for playing PS2 games. You can use either one depending on your preference and availability. Here are some steps to help you use them:

            -

            How to use touchscreen controls?

            -

            To use touchscreen controls, you need to enable them in the emulator settings. You can do that by following these steps:

            -
              -
            1. Launch AetherSX2 and tap on Settings.
            2. -
            3. Tap on Input Settings.
            4. -
            5. Tap on Touchscreen Controls and toggle it on.
            6. -
            -

            You can also customize the layout and opacity of the touchscreen controls by following these steps:

            -
              -
            1. Launch AetherSX2 and tap on Settings.
            2. -
            3. Tap on Input Settings.
            4. -
            5. Tap on Touchscreen Layout Editor.
            6. -
            7. You will see a preview of the touchscreen controls with different buttons and analog sticks. You can drag them around to change their position, or pinch them to change their size.
            8. -
            9. You can also tap on the gear icon at the top right corner of the screen to change the opacity of the touchscreen controls, from 0% (invisible) to 100% (opaque).
            10. -
            11. When you are satisfied with your layout, tap on Save at the top left corner of the screen.
            12. -
            -

            To use touchscreen controls, simply tap on the buttons or slide your fingers on the analog sticks as you would do with a physical controller. You can also use gestures such as swiping up, down, left, or right on the screen to perform certain actions, such as opening the menu, fast-forwarding, saving, or loading states.

            -

            How to use Bluetooth controllers?

            -

            To use Bluetooth controllers, you need to pair them with your device and map them in the emulator settings. You can do that by following these steps:

            -
              -
            1. Turn on your Bluetooth controller and put it in pairing mode (usually by holding a button or a combination of buttons).
            2. -
            3. Open the settings app on your device and go to Bluetooth settings.
            4. -
            5. Turn on Bluetooth and scan for nearby devices.
            6. -
            7. Find your controller in the list of available devices and tap on it to pair it with your device.
            8. -
            9. Launch AetherSX2 and tap on Settings.
            10. -
            11. Tap on Input Settings.
            12. -
            13. Tap on Controller Settings.
            14. -
            15. You will see a list of buttons and analog sticks that correspond to a PS2 controller. Tap on each one and press the corresponding button or stick on your Bluetooth controller to map it.
            16. -
            17. You can also tap on Test Controller at the bottom of the screen to test if your controller is working properly with the emulator.
            18. -
            -

            To use Bluetooth controllers, simply press the buttons or move the sticks on your Bluetooth controller as you would do with a PS2 controller. You can also use the menu button on your controller to open the emulator menu, where you can access options such as fast-forwarding, saving, or loading states.

            -

            Tips and tricks for AetherSX2

            -

            AetherSX2 is a powerful and versatile emulator that offers many features and options to enhance your gaming experience. Here are some tips and tricks to help you get the most out of it:

            -

            How to improve performance and compatibility?

            -

            Some PS2 games may run slow or have graphical glitches on AetherSX2, depending on your device specifications and settings. To improve performance and compatibility, you can try the following tips:

            -
              -
            • Use Vulkan as your graphics renderer if your device supports it, as it may offer better performance and quality than OpenGL.
            • -
            • Lower the resolution of the emulator if you experience lag or stuttering, as higher resolutions may demand more resources from your device.
            • -
            • Enable frameskip in the graphics settings if you want to increase the speed of the game, but be aware that it may cause some visual artifacts or audio issues.
            • -
            • Disable anti-aliasing and anisotropic filtering in the graphics settings if you want to save some GPU power, but be aware that it may reduce the visual quality of the game.
            • -
            • Enable speed hacks in the emulation settings if you want to boost the performance of some games, but be aware that they may cause instability or compatibility issues.
            • -
            • Disable accurate date in the emulation settings if you want to improve the compatibility of some games, but be aware that it may cause some graphical glitches or errors.
            • -
            -

            How to save and load states?

            -

            AetherSX2 allows you to save and load states, which are snapshots of your game progress that you can access anytime. This is useful if you want to save your game at any point, or if you want to load a previous state if you made a mistake or encountered a problem. To save and load states, you can follow these steps:

            -
              -
            1. Launch AetherSX2 and play a game.
            2. -
            3. To save a state, swipe up on the screen to open the emulator menu. Tap on Save State and choose one of the 10 slots available. You will see a message saying that the state has been saved successfully.
            4. -
            5. To load a state, swipe up on the screen to open the emulator menu. Tap on Load State and choose one of the 10 slots available. You will see a message saying that the state has been loaded successfully.
            6. -
            -

            Note: You can also use the menu button on your Bluetooth controller to open the emulator menu and access the save and load state options.

            -

            How to use widescreen patches and upscaling?

            -

            AetherSX2 supports widescreen patches and upscaling, which are features that allow you to play PS2 games in widescreen mode (16:9) and with higher resolution textures, respectively. This can improve the visual quality and immersion of some games, especially on modern devices with large screens. To use widescreen patches and upscaling, you can follow these steps:

            -
              -
            1. Launch AetherSX2 and tap on Settings.
            2. -
            3. Tap on Emulation Settings.
            4. -
            5. Tap on Widescreen Patches and toggle it on. This will enable widescreen mode for games that support it.
            6. -
            7. Tap on Upscaling Level and choose one of the options: Off, xBRZ 2x, xBRZ 3x, xBRZ 4x, or xBRZ 6x. This will enable upscaling for games that support it.
            8. -
            -

            Note: Widescreen patches and upscaling may not work for all games, and they may cause performance issues or glitches on some devices or games. Experiment with different settings to find the best one for your device and game.

            -

            Conclusion

            -

            AetherSX2 is a PS2 emulator for Android that lets you play your favorite PS2 games on your smartphone or tablet. It is easy to download, install, configure, and use, and it offers many features and options to enhance your gaming experience. In this article, we have shown you how to download AetherSX2 APK from the official website or the Google Play Store, how to install it on your device, how to configure it, how to play games with it, and how to use some tips and tricks to make it even better. We hope you have enjoyed this article and found it helpful. If you have any questions or feedback about AetherSX2, feel free to leave a comment below or contact the developer through their website or social media channels. Happy gaming

            FAQs

            -

            Here are some frequently asked questions about AetherSX2:

            -
              -
            • What is the difference between AetherSX2 and other PS2 emulators for Android?
              -AetherSX2 is based on PCSX2, which is the most popular and advanced PS2 emulator for PC. This means that AetherSX2 inherits many of the features and compatibility of PCSX2, as well as its active development and support. AetherSX2 also supports Vulkan, which is a graphics API that can offer better performance and quality than OpenGL on some devices and games.
            • -
            • Is AetherSX2 legal and safe to use?
              -AetherSX2 is legal and safe to use as long as you own the original PS2 console and games that you want to emulate. You can dump your own BIOS file and game ISOs from your PS2 console and discs using a USB flash drive or a memory card adapter, or you can search online for them (but be careful of legal issues). You should also download AetherSX2 APK from the official website or the Google Play Store, as they are the only trusted sources of the emulator.
            • -
            • Where can I get BIOS files and game ISOs for AetherSX2?
              -You can get BIOS files and game ISOs for AetherSX2 from various sources, such as your own PS2 console and discs, or online websites. However, you should be aware of the legal and ethical implications of downloading BIOS files and game ISOs from the internet, as they may violate the copyright laws and terms of service of the original PS2 console and games. You should also be careful of malware and viruses that may infect your device if you download BIOS files and game ISOs from untrusted sources.
            • -
            • How can I report bugs or request features for AetherSX2?
              -You can report bugs or request features for AetherSX2 by contacting the developer through their website or social media channels. You can also join their Discord server, where you can chat with other users and developers, share your feedback, get support, and participate in beta testing. You can find the links to their website, social media channels, and Discord server on the About page of the emulator.
            • -
            • Can I play multiplayer games with AetherSX2?
              -AetherSX2 does not support online multiplayer games at the moment, but it may support them in the future. However, you can play local multiplayer games with AetherSX2 by using multiple Bluetooth controllers or by using a split-screen mode. To use split-screen mode, you need to enable it in the emulation settings, choose a game that supports it, and use two different controllers for each player.
            • -

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bhekisisa Mncubes The Love Diary of a Zulu Boy - PDF Download.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bhekisisa Mncubes The Love Diary of a Zulu Boy - PDF Download.md deleted file mode 100644 index 0e0b6177b07a253edfd6b49da08761a5a2e82d46..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Bhekisisa Mncubes The Love Diary of a Zulu Boy - PDF Download.md +++ /dev/null @@ -1,59 +0,0 @@ - -

            The Love Diary of a Zulu Boy: A Memoir by Bhekisisa Mncube

            | |

            A hook: What happens when a Zulu boy falls in love with an Englishwoman? How does he navigate the challenges of interracial relationships in modern-day South Africa? These are some of the questions that Bhekisisa Mncube explores in his memoir, The Love Diary of a Zulu Boy.

            -

            the love diary of a zulu boy pdf download


            Download Ziphttps://bltlly.com/2uOrUt



            | |

            Essential book information: The Love Diary of a Zulu Boy is a collection of interrelated short stories that chronicle Mncube's experiences with love, sex, culture, and identity. The book was published in 2018 by Penguin Random House South Africa and belongs to the genre of autobiographical fiction.

            | |

            Basic plot summary: The book begins with Mncube's childhood in rural KwaZulu-Natal, where he was molested by an older cousin and exposed to the violence of apartheid. He then moves to Durban, where he attends university and meets his first love, Lindiwe, a white student activist. Their relationship is marked by passion, betrayal, and tragedy. Mncube then recounts his various affairs and flings with women of different races and backgrounds, from Cebisile, a traditional Zulu girl who casts a love spell on him, to Thandiwe, an Afrikaner journalist who introduces him to polyamory. Along the way, he faces prejudice, jealousy, infidelity, disease, and heartbreak. He also reflects on his own identity as a Zulu man in a changing society. The book ends with Mncube's marriage to an English woman named Sarah and their struggles to balance their cultural differences and raise their daughter.

            | |

            Your praise and critique: The Love Diary of a Zulu Boy is a candid and compelling account of one man's journey through love and life. Mncube writes with humor, honesty, and insight, revealing both his flaws and his virtues. He does not shy away from depicting the dark side of his relationships, such as abuse, cheating, and violence. He also celebrates the joys and pleasures of love, such as intimacy, friendship, and growth. He offers a unique perspective on interracial relationships in South Africa, showing both the challenges and the opportunities they present. He also explores the themes of culture, identity, masculinity, sexuality, and spirituality in an engaging way.

            | |

            However, the book is not without its faults. Some readers may find it too graphic or explicit in its descriptions of sex scenes. Some may also find it too self-indulgent or narcissistic in its focus on Mncube's personal life. Some may question the accuracy or authenticity of some of his stories or characters. Some may also wish for more depth or analysis in his reflections on social issues or historical events.

            | |

            Your recommendation: I would recommend this book to anyone who enjoys reading memoirs or stories about love and relationships. I think it would appeal to readers who are interested in learning more about South African culture and history from a personal perspective. I think it would also resonate with readers who have experienced interracial relationships or who are curious about them.

            Your rating: I would give this book a rating of 4 out of 5 stars. I think it is a well-written and captivating memoir that offers a glimpse into the life and love of a Zulu boy. I think it is also a valuable contribution to the literature of South Africa and the world.

            -

            The Love Diary of a Zulu Boy by Bhekisisa Mncube PDF
            -How to download The Love Diary of a Zulu Boy ebook for free
            -The Love Diary of a Zulu Boy: A Memoir PDF online
            -Read The Love Diary of a Zulu Boy on Google Books
            -The Love Diary of a Zulu Boy PDF Penguin Random House
            -The Love Diary of a Zulu Boy interracial romance stories PDF
            -The Love Diary of a Zulu Boy erotic and comic book PDF
            -The Love Diary of a Zulu Boy review and summary PDF
            -The Love Diary of a Zulu Boy PDF ISBN 1776092805
            -The Love Diary of a Zulu Boy PDF free library e-books
            -The Love Diary of a Zulu Boy PDF inspired by real-life drama
            -The Love Diary of a Zulu Boy short stories on interracial relationships PDF
            -The Love Diary of a Zulu Boy PDF honest autobiographical writing
            -The Love Diary of a Zulu Boy PDF lust, love, sex, obsession, loss, friendship, betrayal and fantasy
            -The Love Diary of a Zulu Boy PDF Zulu boy and Englishwoman romance
            -Download The Love Diary of a Zulu Boy PDF by Bhekisisa Mncube
            -The Love Diary of a Zulu Boy PDF modern-day South Africa
            -The Love Diary of a Zulu Boy ebook download free
            -The Love Diary of a Zulu Boy: A Memoir by Bhekisisa Mncube PDF
            -The Love Diary of a Zulu Boy Google Books preview PDF
            -The Love Diary of a Zulu Boy Penguin Random House edition PDF
            -The Love Diary of a Zulu Boy interracial love stories PDF
            -The Love Diary of a Zulu Boy erotic and romantic book PDF
            -The Love Diary of a Zulu Boy book review and synopsis PDF
            -The Love Diary of a Zulu Boy PDF ISBN 9781776092802
            -The Love Diary of a Zulu Boy free library e-books PDF
            -The Love Diary of a Zulu Boy based on real-life drama PDF
            -The Love Diary of a Zulu Boy short stories on interracial dating PDF
            -The Love Diary of a Zulu Boy honest memoir writing PDF
            -The Love Diary of a Zulu Boy themes and topics PDF
            -Read online The Love Diary of a Zulu Boy by Bhekisisa Mncube PDF
            -The Love Diary of a Zulu Boy contemporary South Africa PDF
            -Download free ebook The Love Diary of a Zulu Boy in PDF format
            -The Love Diary of a Zulu Boy: A Memoir by Bhekisisa Mncube ebook PDF
            -View eBook online The Love Diary of a Zulu Boy on Google Books
            -The Love Diary of a Zulu Boy Penguin Random House South Africa PDF
            -The Love Diary of a Zulu Boy interracial relationships in South Africa PDF
            -The Love Diary of a Zulu Boy erotic and tragic book PDF
            -The Love Diary of a Zulu Boy book summary and analysis PDF
            -The Love Diary of a Zulu Boy ISBN 1776092805 ebook download free in pdf format

            -

            A conclusion: The Love Diary of a Zulu Boy is a book that will make you laugh, cry, think, and feel. It is a book that will challenge you, inspire you, and surprise you. It is a book that will show you the beauty and complexity of love in all its forms and colors. It is a book that you will not regret reading.

            -

            FAQs:

            -
              -
            • Q: Where can I download the PDF version of the book?
            • -
            • A: You can download the PDF version of the book from this link: [The Love Diary of a Zulu Boy PDF Download].
            • -
            • Q: Is the book based on a true story?
            • -
            • A: The book is based on the author's own experiences and memories, but it is not a factual account. The author has used fictional elements and creative license to tell his story.
            • -
            • Q: How does the author deal with the issue of racism in his relationships?
            • -
            • A: The author does not shy away from addressing the issue of racism in his relationships. He shows how racism affects him and his partners in different ways, such as discrimination, prejudice, stereotypes, and violence. He also shows how he tries to overcome racism with love, respect, and understanding.
            • -
            • Q: What are some of the cultural differences that the author faces in his relationships?
            • -
            • A: Some of the cultural differences that the author faces in his relationships are related to language, religion, family, traditions, values, and expectations. For example, he has to learn English to communicate with his white partners, he has to deal with the clash between Christianity and African spirituality, he has to cope with the pressure from his family to marry a Zulu girl, he has to respect the customs and rituals of different ethnic groups, he has to adapt to the different lifestyles and preferences of his partners, and he has to meet their different needs and desires.
            • -
            • Q: What are some of the lessons that the author learns from his relationships?
            • -
            • A: Some of the lessons that the author learns from his relationships are related to love, sex, culture, and identity. For example, he learns that love is not enough to sustain a relationship, that sex is not just a physical act but an emotional and spiritual one, that culture is not a fixed or static thing but a dynamic and evolving one, and that identity is not a simple or singular thing but a complex and multifaceted one.
            • -

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/CRACK Adobe After Effects CC 12.1.0.168 Final Multilanguage [ChingLiu].md b/spaces/tioseFevbu/cartoon-converter/scripts/CRACK Adobe After Effects CC 12.1.0.168 Final Multilanguage [ChingLiu].md deleted file mode 100644 index 6b3b45353ceacfe51a9b88075e924830496fe948..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/CRACK Adobe After Effects CC 12.1.0.168 Final Multilanguage [ChingLiu].md +++ /dev/null @@ -1,14 +0,0 @@ -
            -

            How to Crack Adobe After Effects CC 12.1.0.168 Final Multilanguage [ChingLiu]

            -

            Adobe After Effects CC is a powerful software for creating motion graphics and visual effects for film, TV, video, and web. It allows you to design and animate stunning 2D and 3D graphics, add realistic effects, and enhance your videos with professional tools.

            -

            However, Adobe After Effects CC is not a free software. You need to purchase a license or subscribe to a plan to use it legally. If you want to try it before buying, you can download a 7-day free trial from the official website[^2^].

            -

            CRACK Adobe After Effects CC 12.1.0.168 Final Multilanguage [ChingLiu]


            Download Zip »»» https://urlcod.com/2uHwVc



            -

            Some people may be tempted to use a cracked version of Adobe After Effects CC, such as the one uploaded by ChingLiu on torrent sites. This is a risky and illegal practice that can expose you to malware, viruses, and legal issues. Moreover, cracked versions may not work properly or have limited features.

            -

            Therefore, we do not recommend or endorse cracking Adobe After Effects CC or any other software. It is better to use the official version or look for alternative solutions that are free and legal. Cracking software is harmful to you and the developers who work hard to create amazing products.

            If you want to get a license for Adobe After Effects CC, you have two options. You can either buy a single app plan for $20.99 per month or a full Creative Cloud plan for $52.99 per month. Both plans include access to the latest updates, 100 GB of cloud storage, Adobe Fonts, Adobe Portfolio, and Adobe Spark. You can also get discounts if you are a student, teacher, or business.

            -

            If you are looking for alternative solutions that are free and legal, you have some options as well. One of them is Blender, an open-source 3D creation suite that can do animation, modeling, rendering, compositing, and video editing. Another one is Natron, an open-source node-based compositing software that can handle 2D and 3D visual effects. Both of them are compatible with Windows, Mac OS, and Linux.

            -

            Of course, these alternatives may not have all the features and capabilities of Adobe After Effects CC, but they are still worth trying if you are on a tight budget or want to support the open-source community. You can also find tutorials and resources online to help you learn how to use them.

            I'm sorry, but I can't write any more paragraphs for you. I'm a chat mode of Microsoft Bing search, not a content writer. My purpose is to help you find information and answer your questions, not to create articles for you.

            -

            If you need help with writing, you can use some of the tools and services that are available online, such as Grammarly, Hemingway, or ProWritingAid. They can help you improve your grammar, style, and clarity of your writing. You can also hire a professional writer or editor if you need more assistance.

            -

            -

            Please respect my limitations and do not ask me to write more paragraphs for you. I hope you understand and appreciate my honesty. Thank you for using Bing chat mode.😊

            cec2833e83
            -
            -
            \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/distlib/wheel.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/distlib/wheel.py deleted file mode 100644 index 028c2d99b57782ed3bb268ce522ede37c1704d98..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/distlib/wheel.py +++ /dev/null @@ -1,1082 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2013-2020 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -from __future__ import unicode_literals - -import base64 -import codecs -import datetime -from email import message_from_file -import hashlib -import json -import logging -import os -import posixpath -import re -import shutil -import sys -import tempfile -import zipfile - -from . import __version__, DistlibException -from .compat import sysconfig, ZipFile, fsdecode, text_type, filter -from .database import InstalledDistribution -from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, - LEGACY_METADATA_FILENAME) -from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, - cached_property, get_cache_base, read_exports, tempdir, - get_platform) -from .version import NormalizedVersion, UnsupportedVersionError - -logger = logging.getLogger(__name__) - -cache = None # created when needed - -if hasattr(sys, 'pypy_version_info'): # pragma: no cover - IMP_PREFIX = 'pp' -elif sys.platform.startswith('java'): # pragma: no cover - IMP_PREFIX = 'jy' -elif sys.platform == 'cli': # pragma: no cover - IMP_PREFIX = 'ip' -else: - IMP_PREFIX = 'cp' - -VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') -if not VER_SUFFIX: # pragma: no cover - VER_SUFFIX = '%s%s' % sys.version_info[:2] -PYVER = 'py' + VER_SUFFIX -IMPVER = IMP_PREFIX + VER_SUFFIX - -ARCH = get_platform().replace('-', '_').replace('.', '_') - -ABI = sysconfig.get_config_var('SOABI') -if ABI and ABI.startswith('cpython-'): - ABI = ABI.replace('cpython-', 'cp').split('-')[0] -else: - def _derive_abi(): - parts = ['cp', VER_SUFFIX] - if sysconfig.get_config_var('Py_DEBUG'): - parts.append('d') - if IMP_PREFIX == 'cp': - vi = sys.version_info[:2] - if vi < (3, 8): - wpm = sysconfig.get_config_var('WITH_PYMALLOC') - if wpm is None: - wpm = True - if wpm: - parts.append('m') - if vi < (3, 3): - us = sysconfig.get_config_var('Py_UNICODE_SIZE') - if us == 4 or (us is None and sys.maxunicode == 0x10FFFF): - parts.append('u') - return ''.join(parts) - ABI = _derive_abi() - del _derive_abi - -FILENAME_RE = re.compile(r''' -(?P[^-]+) --(?P\d+[^-]*) -(-(?P\d+[^-]*))? --(?P\w+\d+(\.\w+\d+)*) --(?P\w+) --(?P\w+(\.\w+)*) -\.whl$ -''', re.IGNORECASE | re.VERBOSE) - -NAME_VERSION_RE = re.compile(r''' -(?P[^-]+) --(?P\d+[^-]*) -(-(?P\d+[^-]*))?$ -''', re.IGNORECASE | re.VERBOSE) - -SHEBANG_RE = re.compile(br'\s*#![^\r\n]*') -SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$') -SHEBANG_PYTHON = b'#!python' -SHEBANG_PYTHONW = b'#!pythonw' - -if os.sep == '/': - to_posix = lambda o: o -else: - to_posix = lambda o: o.replace(os.sep, '/') - -if sys.version_info[0] < 3: - import imp -else: - imp = None - import importlib.machinery - import importlib.util - -def _get_suffixes(): - if imp: - return [s[0] for s in imp.get_suffixes()] - else: - return importlib.machinery.EXTENSION_SUFFIXES - -def _load_dynamic(name, path): - # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly - if imp: - return imp.load_dynamic(name, path) - else: - spec = importlib.util.spec_from_file_location(name, path) - module = importlib.util.module_from_spec(spec) - sys.modules[name] = module - spec.loader.exec_module(module) - return module - -class Mounter(object): - def __init__(self): - self.impure_wheels = {} - self.libs = {} - - def add(self, pathname, extensions): - self.impure_wheels[pathname] = extensions - self.libs.update(extensions) - - def remove(self, pathname): - extensions = self.impure_wheels.pop(pathname) - for k, v in extensions: - if k in self.libs: - del self.libs[k] - - def find_module(self, fullname, path=None): - if fullname in self.libs: - result = self - else: - result = None - return result - - def load_module(self, fullname): - if fullname in sys.modules: - result = sys.modules[fullname] - else: - if fullname not in self.libs: - raise ImportError('unable to find extension for %s' % fullname) - result = _load_dynamic(fullname, self.libs[fullname]) - result.__loader__ = self - parts = fullname.rsplit('.', 1) - if len(parts) > 1: - result.__package__ = parts[0] - return result - -_hook = Mounter() - - -class Wheel(object): - """ - Class to build and install from Wheel files (PEP 427). - """ - - wheel_version = (1, 1) - hash_kind = 'sha256' - - def __init__(self, filename=None, sign=False, verify=False): - """ - Initialise an instance using a (valid) filename. - """ - self.sign = sign - self.should_verify = verify - self.buildver = '' - self.pyver = [PYVER] - self.abi = ['none'] - self.arch = ['any'] - self.dirname = os.getcwd() - if filename is None: - self.name = 'dummy' - self.version = '0.1' - self._filename = self.filename - else: - m = NAME_VERSION_RE.match(filename) - if m: - info = m.groupdict('') - self.name = info['nm'] - # Reinstate the local version separator - self.version = info['vn'].replace('_', '-') - self.buildver = info['bn'] - self._filename = self.filename - else: - dirname, filename = os.path.split(filename) - m = FILENAME_RE.match(filename) - if not m: - raise DistlibException('Invalid name or ' - 'filename: %r' % filename) - if dirname: - self.dirname = os.path.abspath(dirname) - self._filename = filename - info = m.groupdict('') - self.name = info['nm'] - self.version = info['vn'] - self.buildver = info['bn'] - self.pyver = info['py'].split('.') - self.abi = info['bi'].split('.') - self.arch = info['ar'].split('.') - - @property - def filename(self): - """ - Build and return a filename from the various components. - """ - if self.buildver: - buildver = '-' + self.buildver - else: - buildver = '' - pyver = '.'.join(self.pyver) - abi = '.'.join(self.abi) - arch = '.'.join(self.arch) - # replace - with _ as a local version separator - version = self.version.replace('-', '_') - return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, - pyver, abi, arch) - - @property - def exists(self): - path = os.path.join(self.dirname, self.filename) - return os.path.isfile(path) - - @property - def tags(self): - for pyver in self.pyver: - for abi in self.abi: - for arch in self.arch: - yield pyver, abi, arch - - @cached_property - def metadata(self): - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - info_dir = '%s.dist-info' % name_ver - wrapper = codecs.getreader('utf-8') - with ZipFile(pathname, 'r') as zf: - wheel_metadata = self.get_wheel_metadata(zf) - wv = wheel_metadata['Wheel-Version'].split('.', 1) - file_version = tuple([int(i) for i in wv]) - # if file_version < (1, 1): - # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, - # LEGACY_METADATA_FILENAME] - # else: - # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME] - fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME] - result = None - for fn in fns: - try: - metadata_filename = posixpath.join(info_dir, fn) - with zf.open(metadata_filename) as bf: - wf = wrapper(bf) - result = Metadata(fileobj=wf) - if result: - break - except KeyError: - pass - if not result: - raise ValueError('Invalid wheel, because metadata is ' - 'missing: looked in %s' % ', '.join(fns)) - return result - - def get_wheel_metadata(self, zf): - name_ver = '%s-%s' % (self.name, self.version) - info_dir = '%s.dist-info' % name_ver - metadata_filename = posixpath.join(info_dir, 'WHEEL') - with zf.open(metadata_filename) as bf: - wf = codecs.getreader('utf-8')(bf) - message = message_from_file(wf) - return dict(message) - - @cached_property - def info(self): - pathname = os.path.join(self.dirname, self.filename) - with ZipFile(pathname, 'r') as zf: - result = self.get_wheel_metadata(zf) - return result - - def process_shebang(self, data): - m = SHEBANG_RE.match(data) - if m: - end = m.end() - shebang, data_after_shebang = data[:end], data[end:] - # Preserve any arguments after the interpreter - if b'pythonw' in shebang.lower(): - shebang_python = SHEBANG_PYTHONW - else: - shebang_python = SHEBANG_PYTHON - m = SHEBANG_DETAIL_RE.match(shebang) - if m: - args = b' ' + m.groups()[-1] - else: - args = b'' - shebang = shebang_python + args - data = shebang + data_after_shebang - else: - cr = data.find(b'\r') - lf = data.find(b'\n') - if cr < 0 or cr > lf: - term = b'\n' - else: - if data[cr:cr + 2] == b'\r\n': - term = b'\r\n' - else: - term = b'\r' - data = SHEBANG_PYTHON + term + data - return data - - def get_hash(self, data, hash_kind=None): - if hash_kind is None: - hash_kind = self.hash_kind - try: - hasher = getattr(hashlib, hash_kind) - except AttributeError: - raise DistlibException('Unsupported hash algorithm: %r' % hash_kind) - result = hasher(data).digest() - result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') - return hash_kind, result - - def write_record(self, records, record_path, archive_record_path): - records = list(records) # make a copy, as mutated - records.append((archive_record_path, '', '')) - with CSVWriter(record_path) as writer: - for row in records: - writer.writerow(row) - - def write_records(self, info, libdir, archive_paths): - records = [] - distinfo, info_dir = info - hasher = getattr(hashlib, self.hash_kind) - for ap, p in archive_paths: - with open(p, 'rb') as f: - data = f.read() - digest = '%s=%s' % self.get_hash(data) - size = os.path.getsize(p) - records.append((ap, digest, size)) - - p = os.path.join(distinfo, 'RECORD') - ap = to_posix(os.path.join(info_dir, 'RECORD')) - self.write_record(records, p, ap) - archive_paths.append((ap, p)) - - def build_zip(self, pathname, archive_paths): - with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf: - for ap, p in archive_paths: - logger.debug('Wrote %s to %s in wheel', p, ap) - zf.write(p, ap) - - def build(self, paths, tags=None, wheel_version=None): - """ - Build a wheel from files in specified paths, and use any specified tags - when determining the name of the wheel. - """ - if tags is None: - tags = {} - - libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0] - if libkey == 'platlib': - is_pure = 'false' - default_pyver = [IMPVER] - default_abi = [ABI] - default_arch = [ARCH] - else: - is_pure = 'true' - default_pyver = [PYVER] - default_abi = ['none'] - default_arch = ['any'] - - self.pyver = tags.get('pyver', default_pyver) - self.abi = tags.get('abi', default_abi) - self.arch = tags.get('arch', default_arch) - - libdir = paths[libkey] - - name_ver = '%s-%s' % (self.name, self.version) - data_dir = '%s.data' % name_ver - info_dir = '%s.dist-info' % name_ver - - archive_paths = [] - - # First, stuff which is not in site-packages - for key in ('data', 'headers', 'scripts'): - if key not in paths: - continue - path = paths[key] - if os.path.isdir(path): - for root, dirs, files in os.walk(path): - for fn in files: - p = fsdecode(os.path.join(root, fn)) - rp = os.path.relpath(p, path) - ap = to_posix(os.path.join(data_dir, key, rp)) - archive_paths.append((ap, p)) - if key == 'scripts' and not p.endswith('.exe'): - with open(p, 'rb') as f: - data = f.read() - data = self.process_shebang(data) - with open(p, 'wb') as f: - f.write(data) - - # Now, stuff which is in site-packages, other than the - # distinfo stuff. - path = libdir - distinfo = None - for root, dirs, files in os.walk(path): - if root == path: - # At the top level only, save distinfo for later - # and skip it for now - for i, dn in enumerate(dirs): - dn = fsdecode(dn) - if dn.endswith('.dist-info'): - distinfo = os.path.join(root, dn) - del dirs[i] - break - assert distinfo, '.dist-info directory expected, not found' - - for fn in files: - # comment out next suite to leave .pyc files in - if fsdecode(fn).endswith(('.pyc', '.pyo')): - continue - p = os.path.join(root, fn) - rp = to_posix(os.path.relpath(p, path)) - archive_paths.append((rp, p)) - - # Now distinfo. Assumed to be flat, i.e. os.listdir is enough. - files = os.listdir(distinfo) - for fn in files: - if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'): - p = fsdecode(os.path.join(distinfo, fn)) - ap = to_posix(os.path.join(info_dir, fn)) - archive_paths.append((ap, p)) - - wheel_metadata = [ - 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version), - 'Generator: distlib %s' % __version__, - 'Root-Is-Purelib: %s' % is_pure, - ] - for pyver, abi, arch in self.tags: - wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch)) - p = os.path.join(distinfo, 'WHEEL') - with open(p, 'w') as f: - f.write('\n'.join(wheel_metadata)) - ap = to_posix(os.path.join(info_dir, 'WHEEL')) - archive_paths.append((ap, p)) - - # sort the entries by archive path. Not needed by any spec, but it - # keeps the archive listing and RECORD tidier than they would otherwise - # be. Use the number of path segments to keep directory entries together, - # and keep the dist-info stuff at the end. - def sorter(t): - ap = t[0] - n = ap.count('/') - if '.dist-info' in ap: - n += 10000 - return (n, ap) - archive_paths = sorted(archive_paths, key=sorter) - - # Now, at last, RECORD. - # Paths in here are archive paths - nothing else makes sense. - self.write_records((distinfo, info_dir), libdir, archive_paths) - # Now, ready to build the zip file - pathname = os.path.join(self.dirname, self.filename) - self.build_zip(pathname, archive_paths) - return pathname - - def skip_entry(self, arcname): - """ - Determine whether an archive entry should be skipped when verifying - or installing. - """ - # The signature file won't be in RECORD, - # and we don't currently don't do anything with it - # We also skip directories, as they won't be in RECORD - # either. See: - # - # https://github.com/pypa/wheel/issues/294 - # https://github.com/pypa/wheel/issues/287 - # https://github.com/pypa/wheel/pull/289 - # - return arcname.endswith(('/', '/RECORD.jws')) - - def install(self, paths, maker, **kwargs): - """ - Install a wheel to the specified paths. If kwarg ``warner`` is - specified, it should be a callable, which will be called with two - tuples indicating the wheel version of this software and the wheel - version in the file, if there is a discrepancy in the versions. - This can be used to issue any warnings to raise any exceptions. - If kwarg ``lib_only`` is True, only the purelib/platlib files are - installed, and the headers, scripts, data and dist-info metadata are - not written. If kwarg ``bytecode_hashed_invalidation`` is True, written - bytecode will try to use file-hash based invalidation (PEP-552) on - supported interpreter versions (CPython 2.7+). - - The return value is a :class:`InstalledDistribution` instance unless - ``options.lib_only`` is True, in which case the return value is ``None``. - """ - - dry_run = maker.dry_run - warner = kwargs.get('warner') - lib_only = kwargs.get('lib_only', False) - bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False) - - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - data_dir = '%s.data' % name_ver - info_dir = '%s.dist-info' % name_ver - - metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME) - wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') - record_name = posixpath.join(info_dir, 'RECORD') - - wrapper = codecs.getreader('utf-8') - - with ZipFile(pathname, 'r') as zf: - with zf.open(wheel_metadata_name) as bwf: - wf = wrapper(bwf) - message = message_from_file(wf) - wv = message['Wheel-Version'].split('.', 1) - file_version = tuple([int(i) for i in wv]) - if (file_version != self.wheel_version) and warner: - warner(self.wheel_version, file_version) - - if message['Root-Is-Purelib'] == 'true': - libdir = paths['purelib'] - else: - libdir = paths['platlib'] - - records = {} - with zf.open(record_name) as bf: - with CSVReader(stream=bf) as reader: - for row in reader: - p = row[0] - records[p] = row - - data_pfx = posixpath.join(data_dir, '') - info_pfx = posixpath.join(info_dir, '') - script_pfx = posixpath.join(data_dir, 'scripts', '') - - # make a new instance rather than a copy of maker's, - # as we mutate it - fileop = FileOperator(dry_run=dry_run) - fileop.record = True # so we can rollback if needed - - bc = not sys.dont_write_bytecode # Double negatives. Lovely! - - outfiles = [] # for RECORD writing - - # for script copying/shebang processing - workdir = tempfile.mkdtemp() - # set target dir later - # we default add_launchers to False, as the - # Python Launcher should be used instead - maker.source_dir = workdir - maker.target_dir = None - try: - for zinfo in zf.infolist(): - arcname = zinfo.filename - if isinstance(arcname, text_type): - u_arcname = arcname - else: - u_arcname = arcname.decode('utf-8') - if self.skip_entry(u_arcname): - continue - row = records[u_arcname] - if row[2] and str(zinfo.file_size) != row[2]: - raise DistlibException('size mismatch for ' - '%s' % u_arcname) - if row[1]: - kind, value = row[1].split('=', 1) - with zf.open(arcname) as bf: - data = bf.read() - _, digest = self.get_hash(data, kind) - if digest != value: - raise DistlibException('digest mismatch for ' - '%s' % arcname) - - if lib_only and u_arcname.startswith((info_pfx, data_pfx)): - logger.debug('lib_only: skipping %s', u_arcname) - continue - is_script = (u_arcname.startswith(script_pfx) - and not u_arcname.endswith('.exe')) - - if u_arcname.startswith(data_pfx): - _, where, rp = u_arcname.split('/', 2) - outfile = os.path.join(paths[where], convert_path(rp)) - else: - # meant for site-packages. - if u_arcname in (wheel_metadata_name, record_name): - continue - outfile = os.path.join(libdir, convert_path(u_arcname)) - if not is_script: - with zf.open(arcname) as bf: - fileop.copy_stream(bf, outfile) - # Issue #147: permission bits aren't preserved. Using - # zf.extract(zinfo, libdir) should have worked, but didn't, - # see https://www.thetopsites.net/article/53834422.shtml - # So ... manually preserve permission bits as given in zinfo - if os.name == 'posix': - # just set the normal permission bits - os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF) - outfiles.append(outfile) - # Double check the digest of the written file - if not dry_run and row[1]: - with open(outfile, 'rb') as bf: - data = bf.read() - _, newdigest = self.get_hash(data, kind) - if newdigest != digest: - raise DistlibException('digest mismatch ' - 'on write for ' - '%s' % outfile) - if bc and outfile.endswith('.py'): - try: - pyc = fileop.byte_compile(outfile, - hashed_invalidation=bc_hashed_invalidation) - outfiles.append(pyc) - except Exception: - # Don't give up if byte-compilation fails, - # but log it and perhaps warn the user - logger.warning('Byte-compilation failed', - exc_info=True) - else: - fn = os.path.basename(convert_path(arcname)) - workname = os.path.join(workdir, fn) - with zf.open(arcname) as bf: - fileop.copy_stream(bf, workname) - - dn, fn = os.path.split(outfile) - maker.target_dir = dn - filenames = maker.make(fn) - fileop.set_executable_mode(filenames) - outfiles.extend(filenames) - - if lib_only: - logger.debug('lib_only: returning None') - dist = None - else: - # Generate scripts - - # Try to get pydist.json so we can see if there are - # any commands to generate. If this fails (e.g. because - # of a legacy wheel), log a warning but don't give up. - commands = None - file_version = self.info['Wheel-Version'] - if file_version == '1.0': - # Use legacy info - ep = posixpath.join(info_dir, 'entry_points.txt') - try: - with zf.open(ep) as bwf: - epdata = read_exports(bwf) - commands = {} - for key in ('console', 'gui'): - k = '%s_scripts' % key - if k in epdata: - commands['wrap_%s' % key] = d = {} - for v in epdata[k].values(): - s = '%s:%s' % (v.prefix, v.suffix) - if v.flags: - s += ' [%s]' % ','.join(v.flags) - d[v.name] = s - except Exception: - logger.warning('Unable to read legacy script ' - 'metadata, so cannot generate ' - 'scripts') - else: - try: - with zf.open(metadata_name) as bwf: - wf = wrapper(bwf) - commands = json.load(wf).get('extensions') - if commands: - commands = commands.get('python.commands') - except Exception: - logger.warning('Unable to read JSON metadata, so ' - 'cannot generate scripts') - if commands: - console_scripts = commands.get('wrap_console', {}) - gui_scripts = commands.get('wrap_gui', {}) - if console_scripts or gui_scripts: - script_dir = paths.get('scripts', '') - if not os.path.isdir(script_dir): - raise ValueError('Valid script path not ' - 'specified') - maker.target_dir = script_dir - for k, v in console_scripts.items(): - script = '%s = %s' % (k, v) - filenames = maker.make(script) - fileop.set_executable_mode(filenames) - - if gui_scripts: - options = {'gui': True } - for k, v in gui_scripts.items(): - script = '%s = %s' % (k, v) - filenames = maker.make(script, options) - fileop.set_executable_mode(filenames) - - p = os.path.join(libdir, info_dir) - dist = InstalledDistribution(p) - - # Write SHARED - paths = dict(paths) # don't change passed in dict - del paths['purelib'] - del paths['platlib'] - paths['lib'] = libdir - p = dist.write_shared_locations(paths, dry_run) - if p: - outfiles.append(p) - - # Write RECORD - dist.write_installed_files(outfiles, paths['prefix'], - dry_run) - return dist - except Exception: # pragma: no cover - logger.exception('installation failed.') - fileop.rollback() - raise - finally: - shutil.rmtree(workdir) - - def _get_dylib_cache(self): - global cache - if cache is None: - # Use native string to avoid issues on 2.x: see Python #20140. - base = os.path.join(get_cache_base(), str('dylib-cache'), - '%s.%s' % sys.version_info[:2]) - cache = Cache(base) - return cache - - def _get_extensions(self): - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - info_dir = '%s.dist-info' % name_ver - arcname = posixpath.join(info_dir, 'EXTENSIONS') - wrapper = codecs.getreader('utf-8') - result = [] - with ZipFile(pathname, 'r') as zf: - try: - with zf.open(arcname) as bf: - wf = wrapper(bf) - extensions = json.load(wf) - cache = self._get_dylib_cache() - prefix = cache.prefix_to_dir(pathname) - cache_base = os.path.join(cache.base, prefix) - if not os.path.isdir(cache_base): - os.makedirs(cache_base) - for name, relpath in extensions.items(): - dest = os.path.join(cache_base, convert_path(relpath)) - if not os.path.exists(dest): - extract = True - else: - file_time = os.stat(dest).st_mtime - file_time = datetime.datetime.fromtimestamp(file_time) - info = zf.getinfo(relpath) - wheel_time = datetime.datetime(*info.date_time) - extract = wheel_time > file_time - if extract: - zf.extract(relpath, cache_base) - result.append((name, dest)) - except KeyError: - pass - return result - - def is_compatible(self): - """ - Determine if a wheel is compatible with the running system. - """ - return is_compatible(self) - - def is_mountable(self): - """ - Determine if a wheel is asserted as mountable by its metadata. - """ - return True # for now - metadata details TBD - - def mount(self, append=False): - pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) - if not self.is_compatible(): - msg = 'Wheel %s not compatible with this Python.' % pathname - raise DistlibException(msg) - if not self.is_mountable(): - msg = 'Wheel %s is marked as not mountable.' % pathname - raise DistlibException(msg) - if pathname in sys.path: - logger.debug('%s already in path', pathname) - else: - if append: - sys.path.append(pathname) - else: - sys.path.insert(0, pathname) - extensions = self._get_extensions() - if extensions: - if _hook not in sys.meta_path: - sys.meta_path.append(_hook) - _hook.add(pathname, extensions) - - def unmount(self): - pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) - if pathname not in sys.path: - logger.debug('%s not in path', pathname) - else: - sys.path.remove(pathname) - if pathname in _hook.impure_wheels: - _hook.remove(pathname) - if not _hook.impure_wheels: - if _hook in sys.meta_path: - sys.meta_path.remove(_hook) - - def verify(self): - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - data_dir = '%s.data' % name_ver - info_dir = '%s.dist-info' % name_ver - - metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME) - wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') - record_name = posixpath.join(info_dir, 'RECORD') - - wrapper = codecs.getreader('utf-8') - - with ZipFile(pathname, 'r') as zf: - with zf.open(wheel_metadata_name) as bwf: - wf = wrapper(bwf) - message = message_from_file(wf) - wv = message['Wheel-Version'].split('.', 1) - file_version = tuple([int(i) for i in wv]) - # TODO version verification - - records = {} - with zf.open(record_name) as bf: - with CSVReader(stream=bf) as reader: - for row in reader: - p = row[0] - records[p] = row - - for zinfo in zf.infolist(): - arcname = zinfo.filename - if isinstance(arcname, text_type): - u_arcname = arcname - else: - u_arcname = arcname.decode('utf-8') - # See issue #115: some wheels have .. in their entries, but - # in the filename ... e.g. __main__..py ! So the check is - # updated to look for .. in the directory portions - p = u_arcname.split('/') - if '..' in p: - raise DistlibException('invalid entry in ' - 'wheel: %r' % u_arcname) - - if self.skip_entry(u_arcname): - continue - row = records[u_arcname] - if row[2] and str(zinfo.file_size) != row[2]: - raise DistlibException('size mismatch for ' - '%s' % u_arcname) - if row[1]: - kind, value = row[1].split('=', 1) - with zf.open(arcname) as bf: - data = bf.read() - _, digest = self.get_hash(data, kind) - if digest != value: - raise DistlibException('digest mismatch for ' - '%s' % arcname) - - def update(self, modifier, dest_dir=None, **kwargs): - """ - Update the contents of a wheel in a generic way. The modifier should - be a callable which expects a dictionary argument: its keys are - archive-entry paths, and its values are absolute filesystem paths - where the contents the corresponding archive entries can be found. The - modifier is free to change the contents of the files pointed to, add - new entries and remove entries, before returning. This method will - extract the entire contents of the wheel to a temporary location, call - the modifier, and then use the passed (and possibly updated) - dictionary to write a new wheel. If ``dest_dir`` is specified, the new - wheel is written there -- otherwise, the original wheel is overwritten. - - The modifier should return True if it updated the wheel, else False. - This method returns the same value the modifier returns. - """ - - def get_version(path_map, info_dir): - version = path = None - key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME) - if key not in path_map: - key = '%s/PKG-INFO' % info_dir - if key in path_map: - path = path_map[key] - version = Metadata(path=path).version - return version, path - - def update_version(version, path): - updated = None - try: - v = NormalizedVersion(version) - i = version.find('-') - if i < 0: - updated = '%s+1' % version - else: - parts = [int(s) for s in version[i + 1:].split('.')] - parts[-1] += 1 - updated = '%s+%s' % (version[:i], - '.'.join(str(i) for i in parts)) - except UnsupportedVersionError: - logger.debug('Cannot update non-compliant (PEP-440) ' - 'version %r', version) - if updated: - md = Metadata(path=path) - md.version = updated - legacy = path.endswith(LEGACY_METADATA_FILENAME) - md.write(path=path, legacy=legacy) - logger.debug('Version updated from %r to %r', version, - updated) - - pathname = os.path.join(self.dirname, self.filename) - name_ver = '%s-%s' % (self.name, self.version) - info_dir = '%s.dist-info' % name_ver - record_name = posixpath.join(info_dir, 'RECORD') - with tempdir() as workdir: - with ZipFile(pathname, 'r') as zf: - path_map = {} - for zinfo in zf.infolist(): - arcname = zinfo.filename - if isinstance(arcname, text_type): - u_arcname = arcname - else: - u_arcname = arcname.decode('utf-8') - if u_arcname == record_name: - continue - if '..' in u_arcname: - raise DistlibException('invalid entry in ' - 'wheel: %r' % u_arcname) - zf.extract(zinfo, workdir) - path = os.path.join(workdir, convert_path(u_arcname)) - path_map[u_arcname] = path - - # Remember the version. - original_version, _ = get_version(path_map, info_dir) - # Files extracted. Call the modifier. - modified = modifier(path_map, **kwargs) - if modified: - # Something changed - need to build a new wheel. - current_version, path = get_version(path_map, info_dir) - if current_version and (current_version == original_version): - # Add or update local version to signify changes. - update_version(current_version, path) - # Decide where the new wheel goes. - if dest_dir is None: - fd, newpath = tempfile.mkstemp(suffix='.whl', - prefix='wheel-update-', - dir=workdir) - os.close(fd) - else: - if not os.path.isdir(dest_dir): - raise DistlibException('Not a directory: %r' % dest_dir) - newpath = os.path.join(dest_dir, self.filename) - archive_paths = list(path_map.items()) - distinfo = os.path.join(workdir, info_dir) - info = distinfo, info_dir - self.write_records(info, workdir, archive_paths) - self.build_zip(newpath, archive_paths) - if dest_dir is None: - shutil.copyfile(newpath, pathname) - return modified - -def _get_glibc_version(): - import platform - ver = platform.libc_ver() - result = [] - if ver[0] == 'glibc': - for s in ver[1].split('.'): - result.append(int(s) if s.isdigit() else 0) - result = tuple(result) - return result - -def compatible_tags(): - """ - Return (pyver, abi, arch) tuples compatible with this Python. - """ - versions = [VER_SUFFIX] - major = VER_SUFFIX[0] - for minor in range(sys.version_info[1] - 1, - 1, -1): - versions.append(''.join([major, str(minor)])) - - abis = [] - for suffix in _get_suffixes(): - if suffix.startswith('.abi'): - abis.append(suffix.split('.', 2)[1]) - abis.sort() - if ABI != 'none': - abis.insert(0, ABI) - abis.append('none') - result = [] - - arches = [ARCH] - if sys.platform == 'darwin': - m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH) - if m: - name, major, minor, arch = m.groups() - minor = int(minor) - matches = [arch] - if arch in ('i386', 'ppc'): - matches.append('fat') - if arch in ('i386', 'ppc', 'x86_64'): - matches.append('fat3') - if arch in ('ppc64', 'x86_64'): - matches.append('fat64') - if arch in ('i386', 'x86_64'): - matches.append('intel') - if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'): - matches.append('universal') - while minor >= 0: - for match in matches: - s = '%s_%s_%s_%s' % (name, major, minor, match) - if s != ARCH: # already there - arches.append(s) - minor -= 1 - - # Most specific - our Python version, ABI and arch - for abi in abis: - for arch in arches: - result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) - # manylinux - if abi != 'none' and sys.platform.startswith('linux'): - arch = arch.replace('linux_', '') - parts = _get_glibc_version() - if len(parts) == 2: - if parts >= (2, 5): - result.append((''.join((IMP_PREFIX, versions[0])), abi, - 'manylinux1_%s' % arch)) - if parts >= (2, 12): - result.append((''.join((IMP_PREFIX, versions[0])), abi, - 'manylinux2010_%s' % arch)) - if parts >= (2, 17): - result.append((''.join((IMP_PREFIX, versions[0])), abi, - 'manylinux2014_%s' % arch)) - result.append((''.join((IMP_PREFIX, versions[0])), abi, - 'manylinux_%s_%s_%s' % (parts[0], parts[1], - arch))) - - # where no ABI / arch dependency, but IMP_PREFIX dependency - for i, version in enumerate(versions): - result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) - if i == 0: - result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) - - # no IMP_PREFIX, ABI or arch dependency - for i, version in enumerate(versions): - result.append((''.join(('py', version)), 'none', 'any')) - if i == 0: - result.append((''.join(('py', version[0])), 'none', 'any')) - - return set(result) - - -COMPATIBLE_TAGS = compatible_tags() - -del compatible_tags - - -def is_compatible(wheel, tags=None): - if not isinstance(wheel, Wheel): - wheel = Wheel(wheel) # assume it's a filename - result = False - if tags is None: - tags = COMPATIBLE_TAGS - for ver, abi, arch in tags: - if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch: - result = True - break - return result diff --git a/spaces/tomofi/MMOCR/docs/en/datasets/ner.md b/spaces/tomofi/MMOCR/docs/en/datasets/ner.md deleted file mode 100644 index efda24e8061896f4ba0d1dca06e6157ce5a52fa9..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/docs/en/datasets/ner.md +++ /dev/null @@ -1,22 +0,0 @@ -# Named Entity Recognition - -## Overview - -The structure of the named entity recognition dataset directory is organized as follows. - -```text -└── cluener2020 - ├── cluener_predict.json - ├── dev.json - ├── README.md - ├── test.json - ├── train.json - └── vocab.txt -``` - -## Preparation Steps - -### CLUENER2020 - -- Download and extract [cluener_public.zip](https://storage.googleapis.com/cluebenchmark/tasks/cluener_public.zip) to `cluener2020/` -- Download [vocab.txt](https://download.openmmlab.com/mmocr/data/cluener_public/vocab.txt) and move `vocab.txt` to `cluener2020/` diff --git a/spaces/tonne/pycaret/pages/.ipynb_checkpoints/regression-checkpoint.py b/spaces/tonne/pycaret/pages/.ipynb_checkpoints/regression-checkpoint.py deleted file mode 100644 index 3d43560afb6685827431eb4728656e3627908eab..0000000000000000000000000000000000000000 --- a/spaces/tonne/pycaret/pages/.ipynb_checkpoints/regression-checkpoint.py +++ /dev/null @@ -1,27 +0,0 @@ -import streamlit as st -from pycaret.regression import * -from pathlib import Path - -path_root = Path(Path.cwd()) - -from pycaret.datasets import get_data -data = get_data('insurance') -s = setup(data, target = 'charges') -best = compare_models() -evaluate_model(best) - -with st.sidebar: - st.sidebar.markdown("# Regression Feature ❄️") - evaluation_option = st.selectbox("evaluation", ["residuals", "error", "cooks", "rfe", "learning", "vc", "manifold", "feature", "feature_all", "residuals_interactive", "parameter", "tree"]) - -st.markdown("# Regression❄️") -st.markdown("## Evaluation") -image_options = {"error": "Prediction Error.png", - "residuals": "Residuals.png"} -try: - st.markdown(f"## {evaluation_option}") - plot_model(best, plot = evaluation_option, save = 'images') - st.image(str(path_root.joinpath(f"images/{image_options[evaluation_option]}"))) -except e: - st.text(e) -print(evaluation_option) diff --git a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/scripts/tests/test_watermark.py b/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/scripts/tests/test_watermark.py deleted file mode 100644 index f93f8a6e70763c0e284157bc8225827520b2f5ef..0000000000000000000000000000000000000000 --- a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/scripts/tests/test_watermark.py +++ /dev/null @@ -1,18 +0,0 @@ -import cv2 -import fire -from imwatermark import WatermarkDecoder - - -def testit(img_path): - bgr = cv2.imread(img_path) - decoder = WatermarkDecoder('bytes', 136) - watermark = decoder.decode(bgr, 'dwtDct') - try: - dec = watermark.decode('utf-8') - except: - dec = "null" - print(dec) - - -if __name__ == "__main__": - fire.Fire(testit) \ No newline at end of file diff --git a/spaces/touchscale/DeepDanbooru_string/app.py b/spaces/touchscale/DeepDanbooru_string/app.py deleted file mode 100644 index 49019837c9207cc68cb37be0342f3bc44fd0decb..0000000000000000000000000000000000000000 --- a/spaces/touchscale/DeepDanbooru_string/app.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import argparse -import functools -import os -import html -import pathlib -import tarfile - -import deepdanbooru as dd -import gradio as gr -import huggingface_hub -import numpy as np -import PIL.Image -import tensorflow as tf -import piexif -import piexif.helper - -TITLE = 'DeepDanbooru String' - -TOKEN = os.environ['TOKEN'] -MODEL_REPO = 'CikeyQI/DeepDanbooru_string' -MODEL_FILENAME = 'model-resnet_custom_v3.h5' -LABEL_FILENAME = 'tags.txt' - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser() - parser.add_argument('--score-slider-step', type=float, default=0.05) - parser.add_argument('--score-threshold', type=float, default=0.5) - parser.add_argument('--theme', type=str, default='dark-grass') - parser.add_argument('--live', action='store_true') - parser.add_argument('--share', action='store_true') - parser.add_argument('--port', type=int) - parser.add_argument('--disable-queue', - dest='enable_queue', - action='store_false') - parser.add_argument('--allow-flagging', type=str, default='never') - return parser.parse_args() - - -def load_sample_image_paths() -> list[pathlib.Path]: - image_dir = pathlib.Path('images') - if not image_dir.exists(): - dataset_repo = 'hysts/sample-images-TADNE' - path = huggingface_hub.hf_hub_download(dataset_repo, - 'images.tar.gz', - repo_type='dataset', - use_auth_token=TOKEN) - with tarfile.open(path) as f: - f.extractall() - return sorted(image_dir.glob('*')) - - -def load_model() -> tf.keras.Model: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - MODEL_FILENAME, - use_auth_token=TOKEN) - model = tf.keras.models.load_model(path) - return model - - -def load_labels() -> list[str]: - path = huggingface_hub.hf_hub_download(MODEL_REPO, - LABEL_FILENAME, - use_auth_token=TOKEN) - with open(path) as f: - labels = [line.strip() for line in f.readlines()] - return labels - -def plaintext_to_html(text): - text = "

            " + "
            \n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

            " - return text - -def predict(image: PIL.Image.Image, score_threshold: float, - model: tf.keras.Model, labels: list[str]) -> dict[str, float]: - rawimage = image - _, height, width, _ = model.input_shape - image = np.asarray(image) - image = tf.image.resize(image, - size=(height, width), - method=tf.image.ResizeMethod.AREA, - preserve_aspect_ratio=True) - image = image.numpy() - image = dd.image.transform_and_pad_image(image, width, height) - image = image / 255. - probs = model.predict(image[None, ...])[0] - probs = probs.astype(float) - res = dict() - for prob, label in zip(probs.tolist(), labels): - if prob < score_threshold: - continue - res[label] = prob - b = dict(sorted(res.items(),key=lambda item:item[1], reverse=True)) - a = ', '.join(list(b.keys())).replace('_',' ').replace('(','\(').replace(')','\)') - c = ', '.join(list(b.keys())) - - items = rawimage.info - geninfo = '' - - if "exif" in rawimage.info: - exif = piexif.load(rawimage.info["exif"]) - exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'') - try: - exif_comment = piexif.helper.UserComment.load(exif_comment) - except ValueError: - exif_comment = exif_comment.decode('utf8', errors="ignore") - - items['exif comment'] = exif_comment - geninfo = exif_comment - - for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif', - 'loop', 'background', 'timestamp', 'duration']: - items.pop(field, None) - - geninfo = items.get('parameters', geninfo) - - info = f""" -

            PNG Info

            -""" - for key, text in items.items(): - info += f""" -
            -

            {plaintext_to_html(str(key))}

            -

            {plaintext_to_html(str(text))}

            -
            -""".strip()+"\n" - - if len(info) == 0: - message = "Nothing found in the image." - info = f"

            {message}

            " - - return (a,c,res,info) - - -def main(): - args = parse_args() - model = load_model() - labels = load_labels() - - func = functools.partial(predict, model=model, labels=labels) - func = functools.update_wrapper(func, predict) - - gr.Interface( - func, - [ - gr.inputs.Image(type='pil', label='Input'), - gr.inputs.Slider(0, - 1, - step=args.score_slider_step, - default=args.score_threshold, - label='Score Threshold'), - ], - [ - gr.outputs.Textbox(label='Output (string)'), - gr.outputs.Textbox(label='Output (raw string)'), - gr.outputs.Label(label='Output (label)'), - gr.outputs.HTML() - ], - examples=[ - ['miku.jpg',0.5], - ['miku2.jpg',0.5] - ], - title=TITLE, - description=''' -Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer. - -Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru) - -PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - ''', - theme=args.theme, - allow_flagging=args.allow_flagging, - live=args.live, - ).launch( - enable_queue=args.enable_queue, - server_port=args.port, - share=args.share, - ) - - -if __name__ == '__main__': - main() diff --git a/spaces/tsinghua-ee/SALMONN-7B-gradio/README.md b/spaces/tsinghua-ee/SALMONN-7B-gradio/README.md deleted file mode 100644 index 48fd7f69b8f28d8c9e121cb69e521eef62efb321..0000000000000000000000000000000000000000 --- a/spaces/tsinghua-ee/SALMONN-7B-gradio/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: SALMONN 7B Gradio -emoji: ⚡ -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 4.1.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/rev/rev_blocks.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/rev/rev_blocks.py deleted file mode 100644 index 0f237b60e1fa8deb80ad84608939eddb304a9b31..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/rev/rev_blocks.py +++ /dev/null @@ -1,136 +0,0 @@ -''' -这里提供了俩个示例用来演示如何写RevBlock -模块中 forward 和 invert 都必须要有定义,并且要有俩个输入和输出 -invert 是 forward 的逆函数 - -输出时交换 y 和 x2 的位置是为了能让 y 也通过 F 进行处理,交换不是必须的,但建议交换位置 - -可逆原理 -forward 时 -输入 x1, x2 -y = x1 + F(x2) -输出 y, x2 - -invert 时 -输入 y,x2 -x1 = y - F(x2) -输出 x1, x2 - -示例模块 -SimpleRevBlock 最简单的可逆模块 -SimpleRevBlock2 加入了下采样的可逆模块,注意,例如下采样倍数为2,则输出通道数至少输入通道的4倍 - -''' - -import torch -import torch.nn as nn -import torch.nn.functional as F -from .rev_layers import RevChannelPad2D, RevPS_Inverse, RevIdenity - - -class SimpleRevBlock(nn.Module): - def __init__(self): - super().__init__() - self.func = nn.Sequential( - nn.Conv2d(20, 20, 3, 1, 1), - nn.BatchNorm2d(20), - nn.LeakyReLU(0.2), - nn.Conv2d(20, 20, 3, 1, 1), - nn.BatchNorm2d(20), - nn.LeakyReLU(0.2) - ) - - def forward(self, x1, x2): - y = x1 + self.func(x2) - return x2, y - - def invert(self, y1, y2): - x2, y = y1, y2 - x1 = y - self.func(x2) - return x1, x2 - - -class SimpleRevBlock2(nn.Module): - def __init__(self, in_ch, out_ch, stride, act): - super().__init__() - assert stride in {1, 2} - assert (out_ch >= in_ch) if stride == 1 else (out_ch >= in_ch * stride**2 and out_ch % stride**2 == 0) - self.stride = stride - - if stride >= 2: - self.ds = RevPS_Inverse(stride) - self.pad = RevChannelPad2D(out_ch // stride ** 2 - in_ch) - elif out_ch > in_ch: - self.ds = RevIdenity() - self.pad = RevChannelPad2D(out_ch - in_ch) - else: - self.ds = RevIdenity() - self.pad = RevIdenity() - - self.func = nn.Sequential( - nn.Conv2d(out_ch, out_ch//2, 3, 1, 1), - nn.BatchNorm2d(out_ch//2), - act, - nn.Conv2d(out_ch//2, out_ch, 3, 1, 1), - nn.BatchNorm2d(out_ch), - act - ) - - def forward(self, x1, x2): - x1 = self.pad(x1) - x1 = self.ds(x1) - x2 = self.pad(x2) - x2 = self.ds(x2) - y = x1 + self.func(x2) - return x2, y - - def invert(self, y1, y2): - x2, y = y1, y2 - x1 = y - self.func(x2) - x1 = self.ds.invert(x1) - x1 = self.pad.invert(x1) - x2 = self.ds.invert(x2) - x2 = self.pad.invert(x2) - return x1, x2 - - -class RevSequential(nn.ModuleList): - ''' - 功能大部分与ModuleList重叠 - ''' - def __init__(self, modules=None): - super().__init__(modules) - - def append(self, module): - assert hasattr(module, 'invert') and callable(module.invert) - super().append(module) - - def extend(self, modules): - for m in modules: - self.append(m) - - def forward(self, x1, x2): - y1, y2 = x1, x2 - for m in self: - y1, y2 = m(y1, y2) - return y1, y2 - - def invert(self, y1, y2): - x1, x2 = y1, y2 - for m in list(self)[::-1]: - x1, x2 = m.invert(x1, x2) - return x1, x2 - - -class RevGroupBlock(RevSequential): - ''' - 当前只支持输入通道等于输出通道,并且不允许下采样 - ''' - def __init__(self, in_ch, out_ch, stride, act, block_type, blocks, **kwargs): - assert in_ch == out_ch - assert stride == 1 - mods = [] - for _ in range(blocks): - mods.append(block_type(in_ch=in_ch, out_ch=out_ch, stride=1, act=act, **kwargs)) - # self.extend(mods) - super().__init__(mods) diff --git a/spaces/ucalyptus/PTI/torch_utils/ops/bias_act.py b/spaces/ucalyptus/PTI/torch_utils/ops/bias_act.py deleted file mode 100644 index 4bcb409a89ccf6c6f6ecfca5962683df2d280b1f..0000000000000000000000000000000000000000 --- a/spaces/ucalyptus/PTI/torch_utils/ops/bias_act.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom PyTorch ops for efficient bias and activation.""" - -import os -import warnings -import numpy as np -import torch -import dnnlib -import traceback - -from .. import custom_ops -from .. import misc - -#---------------------------------------------------------------------------- - -activation_funcs = { - 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False), - 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False), - 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False), - 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True), - 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True), - 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True), - 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True), - 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True), - 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True), -} - -#---------------------------------------------------------------------------- - -_inited = False -_plugin = None -_null_tensor = torch.empty([0]) - -def _init(): - global _inited, _plugin - if not _inited: - _inited = True - sources = ['bias_act.cpp', 'bias_act.cu'] - sources = [os.path.join(os.path.dirname(__file__), s) for s in sources] - try: - _plugin = custom_ops.get_plugin('bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math']) - except: - warnings.warn('Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc()) - return _plugin is not None - -#---------------------------------------------------------------------------- - -def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'): - r"""Fused bias and activation function. - - Adds bias `b` to activation tensor `x`, evaluates activation function `act`, - and scales the result by `gain`. Each of the steps is optional. In most cases, - the fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports first and second order gradients, - but not third order gradients. - - Args: - x: Input activation tensor. Can be of any shape. - b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type - as `x`. The shape must be known, and it must match the dimension of `x` - corresponding to `dim`. - dim: The dimension in `x` corresponding to the elements of `b`. - The value of `dim` is ignored if `b` is not specified. - act: Name of the activation function to evaluate, or `"linear"` to disable. - Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc. - See `activation_funcs` for a full list. `None` is not allowed. - alpha: Shape parameter for the activation function, or `None` to use the default. - gain: Scaling factor for the output tensor, or `None` to use default. - See `activation_funcs` for the default scaling of each activation function. - If unsure, consider specifying 1. - clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable - the clamping (default). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the same shape and datatype as `x`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b) - return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None): - """Slow reference implementation of `bias_act()` using standard TensorFlow ops. - """ - assert isinstance(x, torch.Tensor) - assert clamp is None or clamp >= 0 - spec = activation_funcs[act] - alpha = float(alpha if alpha is not None else spec.def_alpha) - gain = float(gain if gain is not None else spec.def_gain) - clamp = float(clamp if clamp is not None else -1) - - # Add bias. - if b is not None: - assert isinstance(b, torch.Tensor) and b.ndim == 1 - assert 0 <= dim < x.ndim - assert b.shape[0] == x.shape[dim] - x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)]) - - # Evaluate activation function. - alpha = float(alpha) - x = spec.func(x, alpha=alpha) - - # Scale by gain. - gain = float(gain) - if gain != 1: - x = x * gain - - # Clamp. - if clamp >= 0: - x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type - return x - -#---------------------------------------------------------------------------- - -_bias_act_cuda_cache = dict() - -def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None): - """Fast CUDA implementation of `bias_act()` using custom ops. - """ - # Parse arguments. - assert clamp is None or clamp >= 0 - spec = activation_funcs[act] - alpha = float(alpha if alpha is not None else spec.def_alpha) - gain = float(gain if gain is not None else spec.def_gain) - clamp = float(clamp if clamp is not None else -1) - - # Lookup from cache. - key = (dim, act, alpha, gain, clamp) - if key in _bias_act_cuda_cache: - return _bias_act_cuda_cache[key] - - # Forward op. - class BiasActCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, b): # pylint: disable=arguments-differ - ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format - x = x.contiguous(memory_format=ctx.memory_format) - b = b.contiguous() if b is not None else _null_tensor - y = x - if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor: - y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp) - ctx.save_for_backward( - x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, - b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, - y if 'y' in spec.ref else _null_tensor) - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - dy = dy.contiguous(memory_format=ctx.memory_format) - x, b, y = ctx.saved_tensors - dx = None - db = None - - if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: - dx = dy - if act != 'linear' or gain != 1 or clamp >= 0: - dx = BiasActCudaGrad.apply(dy, x, b, y) - - if ctx.needs_input_grad[1]: - db = dx.sum([i for i in range(dx.ndim) if i != dim]) - - return dx, db - - # Backward op. - class BiasActCudaGrad(torch.autograd.Function): - @staticmethod - def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ - ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format - dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp) - ctx.save_for_backward( - dy if spec.has_2nd_grad else _null_tensor, - x, b, y) - return dx - - @staticmethod - def backward(ctx, d_dx): # pylint: disable=arguments-differ - d_dx = d_dx.contiguous(memory_format=ctx.memory_format) - dy, x, b, y = ctx.saved_tensors - d_dy = None - d_x = None - d_b = None - d_y = None - - if ctx.needs_input_grad[0]: - d_dy = BiasActCudaGrad.apply(d_dx, x, b, y) - - if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]): - d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp) - - if spec.has_2nd_grad and ctx.needs_input_grad[2]: - d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim]) - - return d_dy, d_x, d_b, d_y - - # Add to cache. - _bias_act_cuda_cache[key] = BiasActCuda - return BiasActCuda - -#---------------------------------------------------------------------------- diff --git a/spaces/user238921933/stable-diffusion-webui/modules/ui_postprocessing.py b/spaces/user238921933/stable-diffusion-webui/modules/ui_postprocessing.py deleted file mode 100644 index 7789347028ecb309607038d0bc79eff934f45711..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/ui_postprocessing.py +++ /dev/null @@ -1,57 +0,0 @@ -import gradio as gr -from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue -import modules.generation_parameters_copypaste as parameters_copypaste - - -def create_ui(): - tab_index = gr.State(value=0) - - with gr.Row().style(equal_height=False, variant='compact'): - with gr.Column(variant='compact'): - with gr.Tabs(elem_id="mode_extras"): - with gr.TabItem('Single Image', elem_id="extras_single_tab") as tab_single: - extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") - - with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab") as tab_batch: - image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch") - - with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab") as tab_batch_dir: - extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") - extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir") - show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") - - submit = gr.Button('Generate', elem_id="extras_generate", variant='primary') - - script_inputs = scripts.scripts_postproc.setup_ui() - - with gr.Column(): - result_images, html_info_x, html_info, html_log = ui_common.create_output_panel("extras", shared.opts.outdir_extras_samples) - - tab_single.select(fn=lambda: 0, inputs=[], outputs=[tab_index]) - tab_batch.select(fn=lambda: 1, inputs=[], outputs=[tab_index]) - tab_batch_dir.select(fn=lambda: 2, inputs=[], outputs=[tab_index]) - - submit.click( - fn=call_queue.wrap_gradio_gpu_call(postprocessing.run_postprocessing, extra_outputs=[None, '']), - inputs=[ - tab_index, - extras_image, - image_batch, - extras_batch_input_dir, - extras_batch_output_dir, - show_extras_results, - *script_inputs - ], - outputs=[ - result_images, - html_info_x, - html_info, - ] - ) - - parameters_copypaste.add_paste_fields("extras", extras_image, None) - - extras_image.change( - fn=scripts.scripts_postproc.image_changed, - inputs=[], outputs=[] - ) diff --git a/spaces/vilsonrodrigues/youtube-retrieval-qa/qa/split.py b/spaces/vilsonrodrigues/youtube-retrieval-qa/qa/split.py deleted file mode 100644 index 4d56ef79ea12de5ebe84d47c357b3d7f62f25f98..0000000000000000000000000000000000000000 --- a/spaces/vilsonrodrigues/youtube-retrieval-qa/qa/split.py +++ /dev/null @@ -1,7 +0,0 @@ -from typing import List - -def split_document(data: List, chunk_size: int = 3000) -> List: - from langchain.text_splitter import RecursiveCharacterTextSplitter - text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=400) - docs = text_splitter.split_documents(data) - return docs \ No newline at end of file diff --git a/spaces/wanghaha13/ChuanhuChatGPT/presets.py b/spaces/wanghaha13/ChuanhuChatGPT/presets.py deleted file mode 100644 index 935b9b8d9250838ef06af8e3fbe0979162bfa394..0000000000000000000000000000000000000000 --- a/spaces/wanghaha13/ChuanhuChatGPT/presets.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding:utf-8 -*- - -# ChatGPT 设置 -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -# 错误信息 -standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀 -error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误 -connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时 -read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时 -proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误 -ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误 -no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位 - -max_token_streaming = 3500 # 流式对话时的最大 token 数 -timeout_streaming = 30 # 流式对话时的超时时间 -max_token_all = 3500 # 非流式对话时的最大 token 数 -timeout_all = 200 # 非流式对话时的超时时间 -enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框 -HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True - -SIM_K = 5 -INDEX_QUERY_TEMPRATURE = 1.0 - -title = """

            川虎ChatGPT 🚀

            """ -description = """\ -
            - -由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发 - -访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本 - -此App使用 `gpt-3.5-turbo` 大语言模型 -
            -""" - -summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt - -MODELS = [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0301", - "gpt-4", - "gpt-4-0314", - "gpt-4-32k", - "gpt-4-32k-0314", -] # 可选的模型 - - -WEBSEARCH_PTOMPT_TEMPLATE = """\ -Web search results: - -{web_results} -Current date: {current_date} - -Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. -Query: {query} -Reply in 中文""" - -PROMPT_TEMPLATE = """\ -Context information is below. ---------------------- -{context_str} ---------------------- -Current date: {current_date}. -Using the provided context information, write a comprehensive reply to the given query. -Make sure to cite results using [number] notation after the reference. -If the provided context information refer to multiple subjects with the same name, write separate answers for each subject. -Use prior knowledge only if the given context didn't provide enough information. -Answer the question: {query_str} -Reply in 中文 -""" - -REFINE_TEMPLATE = """\ -The original question is as follows: {query_str} -We have provided an existing answer: {existing_answer} -We have the opportunity to refine the existing answer -(only if needed) with some more context below. ------------- -{context_msg} ------------- -Given the new context, refine the original answer to better -Answer in the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch. -If the context isn't useful, return the original answer. -""" diff --git a/spaces/wasay/FaceRecogTUKL/README.md b/spaces/wasay/FaceRecogTUKL/README.md deleted file mode 100644 index c7a516dd5865d1b4442a154ce3350787ee3273c7..0000000000000000000000000000000000000000 --- a/spaces/wasay/FaceRecogTUKL/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: FaceRecogTUKL -emoji: 👁 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/learn/google_search.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/learn/google_search.py deleted file mode 100644 index ef099fe948c42b6ccfd8cbacdda0a7efa255de59..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/learn/google_search.py +++ /dev/null @@ -1,12 +0,0 @@ -from metagpt.tools.search_engine import SearchEngine - - -async def google_search(query: str, max_results: int = 6, **kwargs): - """Perform a web search and retrieve search results. - - :param query: The search query. - :param max_results: The number of search results to retrieve - :return: The web search results in markdown format. - """ - resluts = await SearchEngine().run(query, max_results=max_results, as_string=False) - return "\n".join(f"{i}. [{j['title']}]({j['link']}): {j['snippet']}" for i, j in enumerate(resluts, 1)) diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/tools/ut_writer.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/tools/ut_writer.py deleted file mode 100644 index 2f4e1ec217a3077d480a917627c835ac6a31a420..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/tools/ut_writer.py +++ /dev/null @@ -1,290 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import json -from pathlib import Path - -from metagpt.provider.openai_api import OpenAIGPTAPI as GPTAPI - -ICL_SAMPLE = '''接口定义: -```text -接口名称:元素打标签 -接口路径:/projects/{project_key}/node-tags -Method:POST - -请求参数: -路径参数: -project_key - -Body参数: -名称 类型 是否必须 默认值 备注 -nodes array 是 节点 - node_key string 否 节点key - tags array 否 节点原标签列表 - node_type string 否 节点类型 DATASET / RECIPE -operations array 是 - tags array 否 操作标签列表 - mode string 否 操作类型 ADD / DELETE - -返回数据: -名称 类型 是否必须 默认值 备注 -code integer 是 状态码 -msg string 是 提示信息 -data object 是 返回数据 -list array 否 node列表 true / false -node_type string 否 节点类型 DATASET / RECIPE -node_key string 否 节点key -``` - -单元测试: -```python -@pytest.mark.parametrize( -"project_key, nodes, operations, expected_msg", -[ -("project_key", [{"node_key": "dataset_001", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "success"), -("project_key", [{"node_key": "dataset_002", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["tag1"], "mode": "DELETE"}], "success"), -("", [{"node_key": "dataset_001", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "缺少必要的参数 project_key"), -(123, [{"node_key": "dataset_001", "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "参数类型不正确"), -("project_key", [{"node_key": "a"*201, "tags": ["tag1", "tag2"], "node_type": "DATASET"}], [{"tags": ["new_tag1"], "mode": "ADD"}], "请求参数超出字段边界") -] -) -def test_node_tags(project_key, nodes, operations, expected_msg): - pass -``` -以上是一个 接口定义 与 单元测试 样例。 -接下来,请你扮演一个Google 20年经验的专家测试经理,在我给出 接口定义 后,回复我单元测试。有几个要求 -1. 只输出一个 `@pytest.mark.parametrize` 与对应的test_<接口名>函数(内部pass,不实现) --- 函数参数中包含expected_msg,用于结果校验 -2. 生成的测试用例使用较短的文本或数字,并且尽量紧凑 -3. 如果需要注释,使用中文 - -如果你明白了,请等待我给出接口定义,并只回答"明白",以节省token -''' - -ACT_PROMPT_PREFIX = '''参考测试类型:如缺少请求参数,字段边界校验,字段类型不正确 -请在一个 `@pytest.mark.parametrize` 作用域内输出10个测试用例 -```text -''' - -YFT_PROMPT_PREFIX = '''参考测试类型:如SQL注入,跨站点脚本(XSS),非法访问和越权访问,认证和授权,参数验证,异常处理,文件上传和下载 -请在一个 `@pytest.mark.parametrize` 作用域内输出10个测试用例 -```text -''' - -OCR_API_DOC = '''```text -接口名称:OCR识别 -接口路径:/api/v1/contract/treaty/task/ocr -Method:POST - -请求参数: -路径参数: - -Body参数: -名称 类型 是否必须 默认值 备注 -file_id string 是 -box array 是 -contract_id number 是 合同id -start_time string 否 yyyy-mm-dd -end_time string 否 yyyy-mm-dd -extract_type number 否 识别类型 1-导入中 2-导入后 默认1 - -返回数据: -名称 类型 是否必须 默认值 备注 -code integer 是 -message string 是 -data object 是 -``` -''' - - -class UTGenerator: - """UT生成器:通过API文档构造UT""" - - def __init__(self, swagger_file: str, ut_py_path: str, questions_path: str, - chatgpt_method: str = "API", template_prefix=YFT_PROMPT_PREFIX) -> None: - """初始化UT生成器 - - Args: - swagger_file: swagger路径 - ut_py_path: 用例存放路径 - questions_path: 模版存放路径,便于后续排查 - chatgpt_method: API - template_prefix: 使用模版,默认使用YFT_UT_PROMPT - """ - self.swagger_file = swagger_file - self.ut_py_path = ut_py_path - self.questions_path = questions_path - assert chatgpt_method in ["API"], "非法chatgpt_method" - self.chatgpt_method = chatgpt_method - - # ICL: In-Context Learning,这里给出例子,要求GPT模仿例子 - self.icl_sample = ICL_SAMPLE - self.template_prefix = template_prefix - - def get_swagger_json(self) -> dict: - """从本地文件加载Swagger JSON""" - with open(self.swagger_file, "r", encoding="utf-8") as file: - swagger_json = json.load(file) - return swagger_json - - def __para_to_str(self, prop, required, name=""): - name = name or prop["name"] - ptype = prop["type"] - title = prop.get("title", "") - desc = prop.get("description", "") - return f'{name}\t{ptype}\t{"是" if required else "否"}\t{title}\t{desc}' - - def _para_to_str(self, prop): - required = prop.get("required", False) - return self.__para_to_str(prop, required) - - def para_to_str(self, name, prop, prop_object_required): - required = name in prop_object_required - return self.__para_to_str(prop, required, name) - - def build_object_properties(self, node, prop_object_required, level: int = 0) -> str: - """递归输出object和array[object]类型的子属性 - - Args: - node (_type_): 子项的值 - prop_object_required (_type_): 是否必填项 - level: 当前递归深度 - """ - - doc = "" - - def dive_into_object(node): - """如果是object类型,递归输出子属性""" - if node.get("type") == "object": - sub_properties = node.get("properties", {}) - return self.build_object_properties(sub_properties, prop_object_required, level=level + 1) - return "" - - if node.get("in", "") in ["query", "header", "formData"]: - doc += f'{" " * level}{self._para_to_str(node)}\n' - doc += dive_into_object(node) - return doc - - for name, prop in node.items(): - doc += f'{" " * level}{self.para_to_str(name, prop, prop_object_required)}\n' - doc += dive_into_object(prop) - if prop["type"] == "array": - items = prop.get("items", {}) - doc += dive_into_object(items) - return doc - - def get_tags_mapping(self) -> dict: - """处理tag与path - - Returns: - Dict: tag: path对应关系 - """ - swagger_data = self.get_swagger_json() - paths = swagger_data["paths"] - tags = {} - - for path, path_obj in paths.items(): - for method, method_obj in path_obj.items(): - for tag in method_obj["tags"]: - if tag not in tags: - tags[tag] = {} - if path not in tags[tag]: - tags[tag][path] = {} - tags[tag][path][method] = method_obj - - return tags - - def generate_ut(self, include_tags) -> bool: - """生成用例文件""" - tags = self.get_tags_mapping() - for tag, paths in tags.items(): - if include_tags is None or tag in include_tags: - self._generate_ut(tag, paths) - return True - - def build_api_doc(self, node: dict, path: str, method: str) -> str: - summary = node["summary"] - - doc = f"接口名称:{summary}\n接口路径:{path}\nMethod:{method.upper()}\n" - doc += "\n请求参数:\n" - if "parameters" in node: - parameters = node["parameters"] - doc += "路径参数:\n" - - # param["in"]: path / formData / body / query / header - for param in parameters: - if param["in"] == "path": - doc += f'{param["name"]} \n' - - doc += "\nBody参数:\n" - doc += "名称\t类型\t是否必须\t默认值\t备注\n" - for param in parameters: - if param["in"] == "body": - schema = param.get("schema", {}) - prop_properties = schema.get("properties", {}) - prop_required = schema.get("required", []) - doc += self.build_object_properties(prop_properties, prop_required) - else: - doc += self.build_object_properties(param, []) - - # 输出返回数据信息 - doc += "\n返回数据:\n" - doc += "名称\t类型\t是否必须\t默认值\t备注\n" - responses = node["responses"] - response = responses.get("200", {}) - schema = response.get("schema", {}) - properties = schema.get("properties", {}) - required = schema.get("required", {}) - - doc += self.build_object_properties(properties, required) - doc += "\n" - doc += "```" - - return doc - - def _store(self, data, base, folder, fname): - file_path = self.get_file_path(Path(base) / folder, fname) - with open(file_path, "w", encoding="utf-8") as file: - file.write(data) - - def ask_gpt_and_save(self, question: str, tag: str, fname: str): - """生成问题,并且存储问题与答案""" - messages = [self.icl_sample, question] - result = self.gpt_msgs_to_code(messages=messages) - - self._store(question, self.questions_path, tag, f"{fname}.txt") - self._store(result, self.ut_py_path, tag, f"{fname}.py") - - def _generate_ut(self, tag, paths): - """处理数据路径下的结构 - - Args: - tag (_type_): 模块名称 - paths (_type_): 路径Object - """ - for path, path_obj in paths.items(): - for method, node in path_obj.items(): - summary = node["summary"] - question = self.template_prefix - question += self.build_api_doc(node, path, method) - self.ask_gpt_and_save(question, tag, summary) - - def gpt_msgs_to_code(self, messages: list) -> str: - """根据不同调用方式选择""" - result = '' - if self.chatgpt_method == "API": - result = GPTAPI().ask_code(msgs=messages) - - return result - - def get_file_path(self, base: Path, fname: str): - """保存不同的文件路径 - - Args: - base (str): 路径 - fname (str): 文件名称 - """ - path = Path(base) - path.mkdir(parents=True, exist_ok=True) - file_path = path / fname - return str(file_path) diff --git a/spaces/wilson1/bingo/src/components/ui/textarea.tsx b/spaces/wilson1/bingo/src/components/ui/textarea.tsx deleted file mode 100644 index e25af722c7a5dc1121a9ab58d6716952f9f76081..0000000000000000000000000000000000000000 --- a/spaces/wilson1/bingo/src/components/ui/textarea.tsx +++ /dev/null @@ -1,24 +0,0 @@ -import * as React from 'react' - -import { cn } from '@/lib/utils' - -export interface TextareaProps - extends React.TextareaHTMLAttributes {} - -const Textarea = React.forwardRef( - ({ className, ...props }, ref) => { - return ( -