diff --git a/spaces/101-5/gpt4free/g4f/.v1/gui/streamlit_app.py b/spaces/101-5/gpt4free/g4f/.v1/gui/streamlit_app.py deleted file mode 100644 index 2dba0a7b672470f20aa163005d42894dd17df7c0..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gui/streamlit_app.py +++ /dev/null @@ -1,52 +0,0 @@ -import os -import sys - -sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir)) - -import streamlit as st -from gpt4free import you - - -def get_answer(question: str) -> str: - # Set cloudflare clearance cookie and get answer from GPT-4 model - try: - result = you.Completion.create(prompt=question) - - return result.text - - except Exception as e: - # Return error message if an exception occurs - return ( - f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.' - ) - - -# Set page configuration and add header -st.set_page_config( - page_title="gpt4freeGUI", - initial_sidebar_state="expanded", - page_icon="🧠", - menu_items={ - 'Get Help': 'https://github.com/xtekky/gpt4free/blob/main/README.md', - 'Report a bug': "https://github.com/xtekky/gpt4free/issues", - 'About': "### gptfree GUI", - }, -) -st.header('GPT4free GUI') - -# Add text area for user input and button to get answer -question_text_area = st.text_area('🤖 Ask Any Question :', placeholder='Explain quantum computing in 50 words') -if st.button('🧠 Think'): - answer = get_answer(question_text_area) - escaped = answer.encode('utf-8').decode('unicode-escape') - # Display answer - st.caption("Answer :") - st.markdown(escaped) - -# Hide Streamlit footer -hide_streamlit_style = """ - - """ -st.markdown(hide_streamlit_style, unsafe_allow_html=True) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/((TOP)) Free Windows 11.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/((TOP)) Free Windows 11.md deleted file mode 100644 index 2a0cd116733fde24763ee3ef9702e0e69965a2c3..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/((TOP)) Free Windows 11.md +++ /dev/null @@ -1,41 +0,0 @@ - -

How to Get Windows 11 for Free on Your PC

-

Windows 11 is the latest version of Microsoft's operating system that offers a redesigned and refreshed look, new tools, sounds, and apps, and improved performance and security. If you are wondering how to get Windows 11 for free on your PC, you have come to the right place.

-

free windows 11


Download >>>>> https://byltly.com/2uKyib



-

In this article, we will show you how to check if your PC is eligible for a free upgrade to Windows 11, how to download and install Windows 11 for free using Windows Update or Installation Assistant, and how to buy a license for Windows 11 if you don't have one. Let's get started.

-

Check If Your PC Is Eligible for a Free Upgrade to Windows 11

-

Windows 11 is a free upgrade for Windows 10 users who have a compatible PC and a valid license. To check if your PC meets the minimum hardware requirements for Windows 11, you can use the PC Health Check app that you can download from Microsoft's website. The app will scan your PC and tell you if it can run Windows 11 or not.

-

The minimum hardware requirements for Windows 11 are:

- -

If your PC meets these requirements, you can proceed to the next step. If not, you may need to upgrade your hardware or buy a new PC that supports Windows 11.

-

Download and Install Windows 11 for Free Using Windows Update or Installation Assistant

-

If your PC is eligible for a free upgrade to Windows 11, you have two options to download and install it: using Windows Update or using Installation Assistant.

-

-

The first option is to use Windows Update, which is the easiest and most recommended way. To do this, follow these steps:

-
    -
  1. Select the Start button, then click on Settings.
  2. -
  3. Click on Update & Security.
  4. -
  5. Select Check for updates to see if your PC can run Windows 11, or if the upgrade is ready for your PC.
  6. -
  7. If the upgrade is available, follow the on-screen instructions to download and install Windows 11.
  8. -
-

The second option is to use Installation Assistant, which is a tool that you can download from Microsoft's website. This option is for those who want to upgrade a Windows 10 PC to Windows 11 manually. To do this, follow these steps:

-
    -
  1. Download the Installation Assistant from Microsoft's website and run it on your PC.
  2. -
  3. You need to be an administrator to run this tool.
  4. -
  5. Once the tool confirms that your device hardware is compatible, you will be presented with the license terms. Select Accept and Install.
  6. -
  7. The tool will download and install Windows 11 on your PC. This may take some time depending on your internet speed and PC performance.
  8. -
-

Buy a License for Windows 11 If You Don't Have One

-

If you don't have a valid license for Windows 10 or any other previous version of Windows, you will need to buy one to run Windows 11 on your PC. You can buy a license from Microsoft's website or from other retailers. The price may vary depending on the edition (Home or Pro) and the region.

-

Once you

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Geant 2500hd probleme boot Le guide complet pour le dbloquer.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Geant 2500hd probleme boot Le guide complet pour le dbloquer.md deleted file mode 100644 index 5a9708326009ca371a6fea22facf8f47fc6d3e9e..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Geant 2500hd probleme boot Le guide complet pour le dbloquer.md +++ /dev/null @@ -1,126 +0,0 @@ - -

How to Fix Geant 2500hd Probleme Boot

-

If you own a Geant 2500hd satellite receiver, you may have encountered a boot problem that prevents you from using your device normally. This problem can be frustrating and annoying, especially if you don't know how to fix it. In this article, we will explain what is Geant 2500hd and what is the boot problem, what are the possible causes of this problem, and how to solve it with different methods.

-

Geant 2500hd probleme boot


Download File ->>> https://byltly.com/2uKyuS



-

What is Geant 2500hd and what is the boot problem?

-

Geant 2500hd is a satellite receiver that offers many features and channels

-

Geant 2500hd is a popular satellite receiver that allows you to watch hundreds of channels from different satellites. It also supports various functions such as recording, playback, internet access, IPTV, YouTube, weather forecast, and more. It has a sleek design and a user-friendly interface that makes it easy to operate.

-

The boot problem occurs when the device is stuck on the boot screen or shows an error message

-

The boot problem is one of the most common issues that affect Geant 2500hd users. It happens when the device fails to start up properly and gets stuck on the boot screen or shows an error message such as "ERROR 114" or "ERROR 10". When this happens, you cannot access any of the features or channels of your device.

-

What are the possible causes of the boot problem?

-

The boot problem can be caused by a corrupted firmware, a wrong update, a power outage, or a hardware issue

-

There are several factors that can trigger the boot problem on your Geant 2500hd device. Some of them are:

- -

How to solve the boot problem with different methods?

-

Method 1: Use a USB flash drive to install the latest firmware

-

One of the easiest ways to fix the boot problem on your Geant 2500hd device is to use a USB flash drive to install the latest firmware. This can help you restore your device's original settings and fix any software issues. To do this, follow these steps:

-
    -
  1. Download the latest firmware from the official website or a trusted source. Make sure you choose the correct model and version for your device.
  2. -
  3. Rename the firmware file to Ali_update.bin and copy it to the root of a formatted USB flash drive.
  4. -
  5. Insert the USB flash drive into your device's USB port.
  6. -
  7. Turn on your device while holding down the standby button on your remote control.
  8. -
  9. Wait for your device to detect the firmware file and install it automatically. You will see a progress bar on your screen.
  10. -
  11. Restart your device and check if the boot problem is solved.
  12. -
-

Method 2: Use a RS232 cable and a loader software to flash your device

-

Another way to fix the boot problem on your Geant 2500hd device is to use a RS232 cable and a loader software to flash your device. This can help you overwrite your device's memory with a new firmware file. To do this, follow these steps:

-
    -
  1. Download a dump file and a loader software from a reliable source. Make sure you choose compatible files for your device.
  2. -
  3. Connect your device to your computer using a RS232 cable.
  4. -
  5. Open the loader software and select EromUpgrade.exe.
  6. -
  7. Select Browse and choose virusdz.bin, which is Géant GN 2500 HD Plus Dump File.
  8. -
  9. Select Include Bootloader.
  10. -
  11. Select Start.
  12. -
  13. Wait for EromUpgrade.exe program window status bar shows DONE!.
  14. -
  15. Disconnect EromUpgrade.exe.
  16. -
  17. Turn on your device.
  18. -
  19. Check if Géant GN 2500 HD Plus Boot Solution du Problème.
  20. -
-

Method 3: Contact a professional technician or customer service for assistance

-

If none of these methods work for you, you may have a hardware issue that requires professional repair. In this case, you should contact your nearest technician or customer service center for help. They can diagnose your device's problem and provide you with an appropriate solution. To do this, follow these steps:

-
    -
  1. Find out where you can find a technician or customer service center near you.
  2. -
  3. Contact them by phone or email and explain your situation.
  4. -
  5. Provide them with your device's model number, serial number, and warranty information if applicable.
  6. -
  7. Follow their instructions on how to send or bring your device for repair.
  8. -
  9. Wait for them to fix your device and return it to you.
  10. -
-

Conclusion

-

The boot problem on Geant 2500hd is a common issue that can prevent you from enjoying your satellite receiver. However, you can fix this problem by using one of the methods we have discussed in this article. You can use a USB flash drive to install the latest firmware, use a RS232 cable and a loader software to flash your device, or contact a professional technician or customer service for assistance. We hope this article has helped you solve your boot problem and restore your device's functionality.

-

How to fix Geant 2500hd boot error
-Geant 2500hd plus bloqué sur boot solution
-Geant 2500hd new stuck on boot mode
-Geant 2500hd plus error 114 and boot problem
-Geant 2500hd plus firmware update and boot issue
-Geant 2500hd plus dump original and boot repair
-Geant 2500hd plus loader Eromupgrade and boot recovery
-Geant 2500hd plus reprogrammer la mémoire flash and boot
-Geant 2500hd plus ALIUP loader and boot fix
-Geant 2500hd plus MAJ 1.44 and boot error
-Geant 2500hd plus ALI3606_AL and boot problem
-Geant 2500hd plus DZSat forum and boot issue
-Geant 2500hd plus gulfup download and boot repair
-Geant 2500hd plus youtube video and boot recovery
-Geant 2500hd plus شرح بسيط لحل مشكل BOOT
-Geant 2500hd plus حل مشكلة error114 و التوقف على BOOT
-Geant 2500hd plus starsat sr-9900 hd and boot fix
-Geant 2500hd plus platini firmware and boot error
-Geant 2500hd plus virusdz.bin dump and boot problem
-Geant 2500hd plus include bootloader and boot issue
-Geant 2500hd new bloqué sur error114 et boot solution
-Geant 2500hd new firmware upgrade and boot problem
-Geant 2500hd new dump original et boot repair
-Geant 2500hd new loader Eromupgrade et boot recovery
-Geant 2500hd new reprogrammer la mémoire flash et boot
-Geant 2500hd new ALIUP loader et boot fix
-Geant 2500hd new MAJ 1.44 et boot error
-Geant 2500hd new ALI3606_AL et boot problem
-Geant 2500hd new DZSat forum et boot issue
-Geant 2500hd new gulfup télécharger et boot repair
-Geant 2500hd new youtube vidéo et boot recovery
-Geant 2500hd new شرح بسيط لحل مشكل BOOT و error114
-Geant 2500hd new حل مشكلة error114 و التوقف على BOOT في جهاز
-Geant 2500hd new starsat sr-9900 hd et boot fix
-Geant 2500hd new platini firmware et boot error
-Geant 2500hd new virusdz.bin dump et boot problem
-Geant 2500hd new include bootloader et boot issue
-How to solve Geant 2500hd plus and new boot problem
-How to recover from Geant 2500hd plus and new error114 and boot issue
-How to repair Geant 2500hd plus and new stuck on boot mode
-How to update firmware for Geant 2500hd plus and new and fix boot error
-How to use loader Eromupgrade for Geant 2500hd plus and new and repair boot problem
-How to reprogram flash memory for Geant 2500hd plus and new and recover from boot issue
-How to use ALIUP loader for Geant 2500hd plus and new and solve boot problem
-How to use MAJ 1.44 for Geant 2500hd plus and new and fix boot error
-How to use ALI3606_AL for Geant 2500hd plus and new and repair boot problem
-How to use DZSat forum for Geant 2500hd plus and new and recover from boot issue
-How to use gulfup download for Geant 2500hd plus and new and solve boot problem
-How to use youtube video for Geant 2500hd plus and new and fix boot error
-How to use virusdz.bin dump for Geant 2500hd plus and new and repair boot problem

-

FAQs

-

What is the difference between Geant 2500hd and Geant 2500hd plus?

-

Geant 2500hd and Geant 2500hd plus are two models of satellite receivers from the same brand. They have similar features and functions, but Geant 2500hd plus has some improvements and enhancements over Geant 2500hd. For example, Geant 2500hd plus has more memory, more channels, more IPTV options, and more compatibility with different satellites.

-

How can I update my Geant 2500hd device?

-

You can update your Geant 2500hd device by downloading the latest firmware from the official website or a trusted source and installing it on your device using a USB flash drive. You can also use a RS232 cable and a loader software to flash your device with a new firmware file. You should always check the compatibility and validity of the firmware before installing it on your device.

-

How can I reset my Geant 2500hd device to factory settings?

-

You can reset your Geant 2500hd device to factory settings by using the menu option on your remote control. You can access the menu by pressing the menu button on your remote control and then selecting settings. You can then select factory reset and confirm your choice. This will erase all your data and settings and restore your device to its original state.

-

How can I backup my Geant 2500hd device's data?

-

You can backup your Geant 2500hd device's data by using a USB flash drive or an external hard drive. You can connect your storage device to your receiver's USB port and then use the menu option on your remote control to copy or transfer your data. You can backup your channel list, favorite list, IPTV list, recording files, and other settings.

-

How can I troubleshoot my Geant 2500hd device's problems?

-

You can troubleshoot your Geant 2500hd device's problems by checking the following things:

- -

If you still have problems with your device, you can contact a professional technician or customer service for help.

-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1line/AutoGPT/autogpt/agent/agent.py b/spaces/1line/AutoGPT/autogpt/agent/agent.py deleted file mode 100644 index ee7885f8844022597321fa6b492430ec34c0d6b9..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/autogpt/agent/agent.py +++ /dev/null @@ -1,197 +0,0 @@ -from colorama import Fore, Style - -from autogpt.app import execute_command, get_command -from autogpt.chat import chat_with_ai, create_chat_message -from autogpt.config import Config -from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques -from autogpt.json_utils.utilities import validate_json -from autogpt.logs import logger, print_assistant_thoughts -from autogpt.speech import say_text -from autogpt.spinner import Spinner -from autogpt.utils import clean_input - - -class Agent: - """Agent class for interacting with Auto-GPT. - - Attributes: - ai_name: The name of the agent. - memory: The memory object to use. - full_message_history: The full message history. - next_action_count: The number of actions to execute. - system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully. - Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals. - - triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is: - Determine which next command to use, and respond using the format specified above: - The triggering prompt is not part of the system prompt because between the system prompt and the triggering - prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve. - SYSTEM PROMPT - CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant) - TRIGGERING PROMPT - - The triggering prompt reminds the AI about its short term meta task (defining the next task) - """ - - def __init__( - self, - ai_name, - memory, - full_message_history, - next_action_count, - system_prompt, - triggering_prompt, - ): - self.ai_name = ai_name - self.memory = memory - self.full_message_history = full_message_history - self.next_action_count = next_action_count - self.system_prompt = system_prompt - self.triggering_prompt = triggering_prompt - - def start_interaction_loop(self): - # Interaction Loop - cfg = Config() - loop_count = 0 - command_name = None - arguments = None - user_input = "" - - while True: - # Discontinue if continuous limit is reached - loop_count += 1 - if ( - cfg.continuous_mode - and cfg.continuous_limit > 0 - and loop_count > cfg.continuous_limit - ): - logger.typewriter_log( - "Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}" - ) - break - - # Send message to AI, get response - with Spinner("Thinking... "): - assistant_reply = chat_with_ai( - self.system_prompt, - self.triggering_prompt, - self.full_message_history, - self.memory, - cfg.fast_token_limit, - ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument - - assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply) - - # Print Assistant thoughts - if assistant_reply_json != {}: - validate_json(assistant_reply_json, "llm_response_format_1") - # Get command name and arguments - try: - print_assistant_thoughts(self.ai_name, assistant_reply_json) - command_name, arguments = get_command(assistant_reply_json) - # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"] - if cfg.speak_mode: - say_text(f"I want to execute {command_name}") - except Exception as e: - logger.error("Error: \n", str(e)) - - if not cfg.continuous_mode and self.next_action_count == 0: - ### GET USER AUTHORIZATION TO EXECUTE COMMAND ### - # Get key press: Prompt the user to press enter to continue or escape - # to exit - logger.typewriter_log( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} " - f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", - ) - print( - "Enter 'y' to authorise command, 'y -N' to run N continuous " - "commands, 'n' to exit program, or enter feedback for " - f"{self.ai_name}...", - flush=True, - ) - while True: - console_input = clean_input( - Fore.MAGENTA + "Input:" + Style.RESET_ALL - ) - if console_input.lower().strip() == "y": - user_input = "GENERATE NEXT COMMAND JSON" - break - elif console_input.lower().strip() == "": - print("Invalid input format.") - continue - elif console_input.lower().startswith("y -"): - try: - self.next_action_count = abs( - int(console_input.split(" ")[1]) - ) - user_input = "GENERATE NEXT COMMAND JSON" - except ValueError: - print( - "Invalid input format. Please enter 'y -n' where n is" - " the number of continuous tasks." - ) - continue - break - elif console_input.lower() == "n": - user_input = "EXIT" - break - else: - user_input = console_input - command_name = "human_feedback" - break - - if user_input == "GENERATE NEXT COMMAND JSON": - logger.typewriter_log( - "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=", - Fore.MAGENTA, - "", - ) - elif user_input == "EXIT": - print("Exiting...", flush=True) - break - else: - # Print command - logger.typewriter_log( - "NEXT ACTION: ", - Fore.CYAN, - f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}" - f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}", - ) - - # Execute command - if command_name is not None and command_name.lower().startswith("error"): - result = ( - f"Command {command_name} threw the following error: {arguments}" - ) - elif command_name == "human_feedback": - result = f"Human feedback: {user_input}" - else: - result = ( - f"Command {command_name} returned: " - f"{execute_command(command_name, arguments)}" - ) - if self.next_action_count > 0: - self.next_action_count -= 1 - - memory_to_add = ( - f"Assistant Reply: {assistant_reply} " - f"\nResult: {result} " - f"\nHuman Feedback: {user_input} " - ) - - self.memory.add(memory_to_add) - - # Check if there's a result from the command append it to the message - # history - if result is not None: - self.full_message_history.append(create_chat_message("system", result)) - logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) - else: - self.full_message_history.append( - create_chat_message("system", "Unable to execute command") - ) - logger.typewriter_log( - "SYSTEM: ", Fore.YELLOW, "Unable to execute command" - ) diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Ultimate The Ultimate Guide to Modding Your Game.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Ultimate The Ultimate Guide to Modding Your Game.md deleted file mode 100644 index 1d107baef83602b27fa215b37ea77f1a647aa6a3..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bus Simulator Ultimate The Ultimate Guide to Modding Your Game.md +++ /dev/null @@ -1,158 +0,0 @@ -
-

How to Use Mod Editor Bus Simulator Ultimate to Create Your Own Bus Driving Experience

-

Do you love bus simulator games? Do you want to customize your own buses, routes, maps, and more? Do you want to share your creations with other bus enthusiasts? If you answered yes to any of these questions, then you need to try mod editor bus simulator ultimate. In this article, we will show you what mod editor bus simulator ultimate is, how to download and install it, how to create and share mods with it, and some FAQs about it. By the end of this article, you will be ready to unleash your creativity and enjoy a whole new level of bus simulation.

-

mod editor bus simulator ultimate


Download File - https://urlin.us/2uSTzm



-

What is Mod Editor Bus Simulator Ultimate?

-

A brief introduction to the game and its features

-

Mod editor bus simulator ultimate is a modding tool for the popular game bus simulator ultimate. Bus simulator ultimate is a realistic and immersive game that lets you drive various buses across different cities and countries. You can choose from different bus models, customize your bus interior and exterior, transport passengers, follow traffic rules, earn money, and more. You can also play online with other players or offline with AI drivers. The game has stunning graphics, realistic physics, dynamic weather, day-night cycle, and many other features that make it one of the best bus simulator games on the market.

-

A brief introduction to the mod editor and its features

-

The mod editor is a free tool that allows you to create your own mods for bus simulator ultimate. Mods are modifications that change or add something to the game, such as new buses, maps, routes, skins, decals, sounds, etc. With the mod editor, you can enhance your bus driving experience by creating your own content and sharing it with the community. The mod editor has many features that make it easy and fun to use, such as:

- -

How to Download and Install Mod Editor Bus Simulator Ultimate?

-

The steps to download and install the mod editor for different platforms

-

The mod editor is available for Windows PC and Mac OS. You can download it from the official website of bus simulator ultimate or from the Epic Games Store. To install it, you need to follow these steps:

-
    -
  1. Download the mod editor installer file from the source of your choice
  2. -
  3. Run the installer file and follow the instructions on the screen
  4. -
  5. Select the destination folder where you want to install the mod editor
  6. -
  7. Wait for the installation to complete and launch the mod editor from the shortcut on your desktop or start menu
  8. -
-

If you have bus simulator ultimate installed on your PC or Mac, the mod editor will automatically detect it and link it to your game folder. If not, you will need to manually locate your game folder and select it in the mod editor settings.

-

The requirements and recommendations for using the mod editor

-

To use the mod editor, you need to have a PC or Mac that meets the minimum system requirements for bus simulator ultimate. These are:

- - - - - - - - - - - - - - - -
OSProcessorMemoryGraphicsStorage
Windows 7/8/10 64-bit or Mac OS X 10.9 or higherIntel Core i3 or equivalent4 GB RAMNVIDIA GeForce GTX 760 or equivalent4 GB available space
-

However, we recommend that you have a PC or Mac that exceeds the recommended system requirements for bus simulator ultimate. These are:

-

How to create your own map mod with BUSSID Mod Editor
-Bus Simulator 21 Modding Kit free download on Epic Games Store
-Best community created buses and maps for Bus Simulator 21 on mod.io
-Bus Simulator 21 Next Stop - Gold Edition with all premium DLCs
-Bus Simulator 21 Modding Kit tutorial and documentation
-How to share your mods with your friends and the community via mod.io
-Bus Simulator 21 modding kit features and requirements
-How to enhance your bus driving experience with custom decals, skins, and interiors
-Bus Simulator 21 modding kit support and feedback
-How to install and update mods for Bus Simulator 21
-Bus Simulator 21 modding kit vs BUSSID Mod Editor comparison
-Bus Simulator 21 modding kit best practices and tips
-How to create realistic exterior skins and interior designs for Bus Simulator 21
-Bus Simulator 21 modding kit FAQs and troubleshooting
-How to create custom decals and logos for Bus Simulator 21
-Bus Simulator 21 modding kit latest news and updates
-How to create your own buses with different models and specifications for Bus Simulator 21
-Bus Simulator 21 modding kit reviews and ratings
-How to create custom maps with different terrains, roads, and landmarks for Bus Simulator 21
-Bus Simulator 21 modding kit showcase and examples
-How to optimize your mods for performance and compatibility for Bus Simulator 21
-Bus Simulator 21 modding kit community and forums
-How to create your own scenarios and missions for Bus Simulator 21
-Bus Simulator 21 modding kit development roadmap and future plans
-How to test and debug your mods for Bus Simulator 21

- - - - - - - - - - - - - - - -
OSProcessorMemoryGraphicsStorage
Windows 10 64-bit or Mac OS X 10.15 or higherIntel Core i5 or equivalent8 GB RAMNVIDIA GeForce GTX 1060 or equivalent8 GB available space
-

This will ensure that you have a smooth and stable performance when using the mod editor and playing the game with your mods. You also need to have a stable internet connection to download and upload mods, and an account on mod.io or Steam to access the modding community.

-

How to Create and Share Mods with Mod Editor Bus Simulator Ultimate?

-

The basic steps to create a mod with the mod editor

-

To create a mod with the mod editor, you need to follow these basic steps:

-
    -
  1. Launch the mod editor and select "Create New Mod" from the main menu
  2. -
  3. Enter a name, description, and tags for your mod and click "Create"
  4. -
  5. Select the type of mod you want to create from the list of templates (e.g. bus, map, route, skin, etc.) and click "Next"
  6. -
  7. Edit the mod settings and properties according to your preferences (e.g. bus model, map size, route length, skin color, etc.) and click "Next"
  8. -
  9. Edit the mod content using the editor tools (e.g. add meshes, textures, materials, animations, sounds, etc.) and click "Save"
  10. -
  11. Preview your mod using the preview mode and make any adjustments if needed (e.g. fix errors, improve quality, add details, etc.) and click "Save"
  12. -
  13. Publish your mod using the built-in uploader and select the platform of your choice (mod.io or Steam Workshop) and click "Upload"
  14. -
  15. Wait for your mod to be uploaded and approved by the platform moderators and enjoy your mod in the game!
  16. -
-

The types of mods you can create with the mod editor

-

The mod editor allows you to create various types of mods for bus simulator ultimate. Some of the most popular types are:

- -

The tips and tricks to make your mods more realistic and fun

-

To make your mods more realistic and fun for yourself and other players, you can follow these tips and tricks:

- -

The steps to share your mods with the community via mod.io or Steam Workshop

-

To share your mods with the community, you can use either mod.io or Steam Workshop. These are online platforms that allow you to upload, download, rate, comment, and subscribe to mods for various games. To share your mods with them, you need to follow these steps:

-
    -
  1. Create an account on mod.io or Steam if you don't have one already
  2. -
  3. Launch the mod editor and select "Publish Mod" from the main menu
  4. -
  5. Select the mod you want to publish and click "Next"
  6. -
  7. Select the platform you want to publish your mod on (mod.io or Steam Workshop) and click "Next"
  8. -
  9. Enter the details of your mod such as title, description, tags, screenshots, etc. and click "Next"
  10. -
  11. Review the terms and conditions of the platform and agree to them if you accept them
  12. -
  13. Click "Upload" and wait for your mod to be uploaded and approved by the platform moderators
  14. -
  15. Once your mod is published, you can view it on the platform website or app and manage it as you wish (e.g. update, delete, etc.)
  16. -
-

Conclusion

-

A summary of the main points and benefits of using mod editor bus simulator ultimate

-

In conclusion, mod editor bus simulator ultimate is a great tool that allows you to create your own bus driving experience by creating and sharing mods for bus simulator ultimate. You can customize various aspects of the game such as buses, maps, routes, skins, sounds, etc. You can also enjoy other people's mods and discover new content and features. Mod editor bus simulator ultimate is easy and fun to use and has many features that make it one of the best modding tools for bus simulator games.

-

A call to action to download and try the mod editor

-

If you are interested in using mod editor bus simulator ultimate, you can download it for free from the official website of bus simulator ultimate or from the Epic Games Store. You can also visit the mod.io or Steam Workshop websites or apps to find and download thousands of mods for bus simulator ultimate created by other users. You can also join the modding community and share your feedback, suggestions, questions, and ideas with other modders and players. So what are you waiting for? Download mod editor bus simulator ultimate today and create your own bus driving experience!

-

FAQs

-

Q1: What are the advantages of using mod editor bus simulator ultimate over other bus simulator games?

-

A1: Mod editor bus simulator ultimate has several advantages over other bus simulator games, such as:

- -

Q2: How can I get feedback and support for my mods?

-

A2: You can get feedback and support for your mods by visiting the mod.io or Steam Workshop websites or apps where you published your mods. There you can read the comments, ratings, reviews, and subscriptions of other users who downloaded your mods. You can also reply to them and thank them for their feedback or answer their questions. You can also join the official discord server of bus simulator ultimate where you can chat with other modders and players.

-

Q3: How can I update or delete my mods?

-

A3: You can update or delete your mods by launching the mod editor and selecting "Publish Mod" from the main menu. There you can select the mod you want to update or delete and click "Next". Then you can select the platform where you published your mod (mod.io or Steam Workshop) and click "Next". Then you can either edit the details of your mod and click "Update" or click "Delete" to remove your mod from the platform. You can also update or delete your mods from the mod.io or Steam Workshop websites or apps by logging in to your account and managing your mods.

-

Q4: How can I find and download other people's mods?

-

A4: You can find and download other people's mods by visiting the mod.io or Steam Workshop websites or apps where they published their mods. There you can browse, search, filter, and sort thousands of mods for bus simulator ultimate created by other users. You can also read the descriptions, screenshots, ratings, reviews, and comments of the mods and decide which ones you want to download. To download a mod, you need to click on the "Subscribe" button on the mod page and wait for the mod to be downloaded and installed in your game. You can also unsubscribe from a mod if you don't want it anymore.

-

Q5: How can I learn more about modding for bus simulator ultimate?

-

A5: You can learn more about modding for bus simulator ultimate by reading the documentation that comes with the mod editor. The documentation explains how to use the mod editor in detail and provides examples and tutorials for creating different types of mods. You can also watch some videos on YouTube that show how to use the mod editor and create mods. You can also join the official discord server of bus simulator ultimate where you can ask questions, get tips, and share ideas with other modders and players.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA Liberty City Stories APK and Obb Data - Streamlined Missions and Enhanced Graphics.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA Liberty City Stories APK and Obb Data - Streamlined Missions and Enhanced Graphics.md deleted file mode 100644 index eb9639cd828d9cb9df02a2c680000673ba467f83..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download GTA Liberty City Stories APK and Obb Data - Streamlined Missions and Enhanced Graphics.md +++ /dev/null @@ -1,138 +0,0 @@ - - - -
-

GTA: Liberty City Stories APK and OBB: How to Download and Install on Android

-

If you are a fan of Grand Theft Auto (GTA) games, you probably know that GTA: Liberty City Stories is one of the best titles in the series. It was originally released in 2005 for PlayStation Portable (PSP) and later ported to PlayStation 2 (PS2) in 2006. It

is a spin-off of GTA III, set in the same fictional city of Liberty City, but three years earlier. You play as Toni Cipriani, a former hitman for the Leone crime family, who returns to the city after a four-year absence. You will have to deal with rival gangs, corrupt politicians, and the FBI as you rise through the ranks of the mafia.

-

But what if you want to play GTA: Liberty City Stories on your Android device? Well, you are in luck, because Rockstar Games has remastered the game for mobile platforms in 2016. The Android version of GTA: Liberty City Stories has improved graphics, touch controls, and cross-platform saves. It also has some exclusive features that are not available on the original versions, such as new high-resolution textures, real-time lighting and shadows, enhanced draw distance, and rebalanced gameplay.

-

gta liberty city stories apk and obb


Download Zip --->>> https://urlin.us/2uSUkp



-

GTA: Liberty City Stories is one of the best GTA games on mobile devices, because it has a shorter and more focused story than other GTA games, making it ideal for playing on the go. It also has a lot of content and variety, such as side missions, mini-games, hidden packages, rampages, and more. You can explore the city on foot or by using various vehicles, such as cars, bikes, boats, and helicopters. You can also customize your character's appearance and weapons.

-

Requirements: What you need to download and install GTA: Liberty City Stories APK and OBB files

-

Before you download and install GTA: Liberty City Stories APK and OBB files on your Android device, you need to make sure that your device meets the minimum and recommended specifications for running the game smoothly. Here are the requirements:

- - - - - - - - - - - - - - - - - - - - - - - - - -
MinimumRecommended
Android 4.0 or higherAndroid 5.0 or higher
1 GB of RAM2 GB of RAM or more
2 GB of free storage space4 GB of free storage space or more
Dual-core processorQuad-core processor or better
Adreno 220 GPU or equivalentAdreno 330 GPU or better
-

You also need to be aware of the risks of downloading APK and OBB files from untrusted sources. APK and OBB files are the files that contain the game's data and resources. APK stands for Android Package Kit, and OBB stands for Opaque Binary Blob. If you download these files from unknown or malicious websites, you may expose your device to viruses, malware, spyware, or other harmful software. You may also compromise your personal information, such as your contacts, photos, messages, or passwords.

-

To avoid these risks, you should only download GTA: Liberty City Stories APK and OBB files from a reliable source that has positive reviews and ratings from other users. One such source is APKCombo, which is a website that provides safe and fast downloads of APK and OBB files for various Android games and apps. You can download GTA: Liberty City Stories APK and OBB files from APKCombo by following this link.

Steps: How to download and install GTA: Liberty City Stories APK and OBB files on your Android device

-

Now that you have checked the requirements and found a reliable source to download GTA: Liberty City Stories APK and OBB files, you are ready to install the game on your Android device. Here are the steps you need to follow:

-

Step 1: Download GTA: Liberty City Stories APK and OBB files from APKCombo

-

The first step is to download GTA: Liberty City Stories APK and OBB files from APKCombo. You can do this by using your web browser on your Android device or on your computer. Here are the screenshots of the process:

-

Screenshot of APKCombo website with GTA: Liberty City Stories download link

-

This is the APKCombo website with the GTA: Liberty City Stories download link. You can see the game's icon, name, rating, size, and version. You can also see the green Download button that you need to tap or click.

-

Screenshot of APKCombo download page with GTA: Liberty City Stories APK and OBB files

-

gta liberty city stories android download apk and obb
-how to install gta liberty city stories apk and obb
-gta liberty city stories mod apk and obb data
-gta liberty city stories apk and obb highly compressed
-gta liberty city stories apk and obb free download
-gta liberty city stories apk and obb latest version
-gta liberty city stories cheats android apk and obb
-gta liberty city stories apk and obb file download
-gta liberty city stories apk and obb revdl
-gta liberty city stories apk and obb offline
-gta liberty city stories apk and obb for pc
-gta liberty city stories apk and obb android 1
-gta liberty city stories apk and obb rexdl
-gta liberty city stories apk and obb google drive
-gta liberty city stories apk and obb 2023 update
-gta liberty city stories apk and obb unlimited money
-gta liberty city stories apk and obb gameplay
-gta liberty city stories apk and obb size
-gta liberty city stories apk and obb requirements
-gta liberty city stories apk and obb mega link
-gta liberty city stories apk and obb mediafıre link
-gta liberty city stories apk and obb mod menu
-gta liberty city stories apk and obb no verification
-gta liberty city stories apk and obb original
-gta liberty city stories apk and obb zip file

-

This is the APKCombo download page with the GTA: Liberty City Stories APK and OBB files. You can see the file names, sizes, and download links. You can also see the blue Download button that you need to tap or click for each file.

-

Screenshot of Android device with GTA: Liberty City Stories APK and OBB files downloaded

-

This is how your Android device will look like after you have downloaded the GTA: Liberty City Stories APK and OBB files. You can see the file icons, names, and sizes in your notification bar or in your file manager app.

-

Step 2: Install GTA: Liberty City Stories APK file on your Android device

-

The next step is to install GTA: Liberty City Stories APK file on your Android device. You can do this by using your file manager app or by tapping on the notification of the downloaded file. Here are the screenshots of the process:

-

Screenshot of Android device with GTA: Liberty City Stories APK file ready to install

-

This is how your Android device will look like when you are ready to install GTA: Liberty City Stories APK file. You can see the file icon, name, and size. You can also see the Install button that you need to tap.

-

Screenshot of Android device with security warning for installing unknown apps

-

This is a security warning that may appear on your Android device when you try to install GTA: Liberty City Stories APK file. It tells you that installing unknown apps may harm your device. You can ignore this warning by tapping on Settings.

-

Screenshot of Android device with option to allow installing unknown apps from browser

-

This is an option that may appear on your Android device after you tap on Settings. It allows you to enable installing unknown apps from your web browser. You can enable this option by toggling the switch.

-

Screenshot of Android device with GTA: Liberty City Stories APK file installing

-

This is how your Android device will look like when GTA: Liberty City Stories APK file is installing. You can see the progress bar and the app permissions. You can also see the Cancel button that you can tap if you want to stop the installation.

-

Screenshot of Android device with GTA: Liberty City Stories APK file installed

-

This is how your Android device will look like when GTA: Liberty City Stories APK file is installed. You can see the app icon, name, and version. You can also see the Done and Open buttons that you can tap.

Step 3: Copy GTA: Liberty City Stories OBB file to your Android device

-

The final step is to copy GTA: Liberty City Stories OBB file to your Android device. You can do this by using your file manager app or by connecting your device to your computer via USB cable. Here are the screenshots of the process:

-

Screenshot of Android device with GTA: Liberty City Stories OBB file ready to copy

-

This is how your Android device will look like when you are ready to copy GTA: Liberty City Stories OBB file. You can see the file icon, name, and size. You can also see the Copy button that you need to tap.

-

Screenshot of Android device with option to select destination folder for GTA: Liberty City Stories OBB file

-

This is an option that may appear on your Android device after you tap on Copy. It allows you to select the destination folder for GTA: Liberty City Stories OBB file. You need to navigate to the folder named Android, then OBB, then com.rockstargames.gtalcs. If you don't see this folder, you need to create it manually.

-

Screenshot of Android device with GTA: Liberty City Stories OBB file copied to destination folder

-

This is how your Android device will look like when GTA: Liberty City Stories OBB file is copied to the destination folder. You can see the file icon, name, and size in the folder named com.rockstargames.gtalcs.

-

Tips: How to optimize your gameplay experience and avoid common issues

-

Congratulations! You have successfully downloaded and installed GTA: Liberty City Stories APK and OBB files on your Android device. You are now ready to enjoy the game on your mobile screen. But before you start playing, here are some tips on how to optimize your gameplay experience and avoid common issues:

-

Tip 1: Adjust the game's settings, controls, graphics, sound, and language to suit your preferences

-

One of the first things you should do when you launch GTA: Liberty City Stories on your Android device is to adjust the game's settings, controls, graphics, sound, and language to suit your preferences. You can do this by tapping on the Menu button on the top left corner of the screen, then tapping on Settings. Here are some screenshots of the options you can customize:

-

Screenshot of GTA: Liberty City Stories settings menu with options for controls

-

This is the settings menu with options for controls. You can change the control layout, sensitivity, vibration, and auto-aim.

-

Screenshot of GTA: Liberty City Stories settings menu with options for graphics

-

This is the settings menu with options for graphics. You can change the resolution, draw distance, shadows, reflections, and frame limiter.

-

Screenshot of GTA: Liberty City Stories settings menu with options for sound

-

This is the settings menu with options for sound. You can change the volume, subtitles, radio station, and radio mode.

-

Screenshot of GTA: Liberty City Stories settings menu with options for language

-

This is the settings menu with options for language. You can change the language of the game's text and voice.

Tip 2: Save your progress, access cheats, use mods, and connect with other players online

-

Another thing you should do when you play GTA: Liberty City Stories on your Android device is to save your progress, access cheats, use mods, and connect with other players online. You can do this by using the following features:

-
    -
  • Save your progress: You can save your progress by visiting any of the safe houses marked on the map. You can also use the cloud save feature to sync your progress across different devices. You can access the cloud save feature by tapping on the Menu button on the top left corner of the screen, then tapping on Cloud Save.
  • -
  • Access cheats: You can access cheats by using the cheat device app that is included in the game. You can launch the cheat device app by tapping on the Menu button on the top left corner of the screen, then tapping on Cheat Device. You can then select from various categories of cheats, such as weapons, health, armor, money, wanted level, weather, time, and more.
  • -
  • Use mods: You can use mods to enhance or change your gameplay experience. Mods are modifications made by other users that add new features, content, or graphics to the game. You can download and install mods from various websites, such as GTAInside or GTA5-Mods. You can then activate or deactivate mods by using the mod manager app that is included in the game. You can launch the mod manager app by tapping on the Menu button on the top left corner of the screen, then tapping on Mod Manager.
  • -
  • Connect with other players online: You can connect with other players online by using the multiplayer mode that is included in the game. You can launch the multiplayer mode by tapping on the Menu button on the top left corner of the screen, then tapping on Multiplayer. You can then choose from various modes of multiplayer, such as deathmatch, team deathmatch, capture the flag, and more. You can also chat with other players and invite them to join your game.
  • -
-

Tip 3: Fix common issues that may occur while playing GTA: Liberty City Stories on your Android device

-

The last thing you should do when you play GTA: Liberty City Stories on your Android device is to fix common issues that may occur while playing. Some of these issues are:

-
    -
  • Game crashing or freezing: This may happen if your device does not meet the minimum or recommended specifications for running the game smoothly. To fix this issue, you should try to lower the game's graphics settings, close other apps running in the background, clear your device's cache and memory, or restart your device.
  • -
  • Game not launching or loading: This may happen if you have not installed GTA: Liberty City Stories APK and OBB files correctly or if you have corrupted or missing files. To fix this issue, you should try to reinstall GTA: Liberty City Stories APK and OBB files from a trusted source or check if you have enough storage space on your device.
  • -
  • Game not saving or syncing: This may happen if you have not enabled cloud save feature or if you have a poor internet connection. To fix this issue, you should try to enable cloud save feature by tapping on the Menu button on the top left corner of the screen, then tapping on Cloud Save. You should also try to connect to a stable and secure Wi-Fi network or use mobile data.
  • -
-

Conclusion: A summary of the main points and a call to action

-

In conclusion, GTA: Liberty City Stories is one of the best GTA games on mobile devices, because it has a shorter and more focused story than other GTA games, making it ideal for playing on the go. It also has a lot of content and variety, such as side missions, mini-games, hidden packages, rampages, and more. You can explore the city on foot or by using various vehicles, such as cars, bikes, boats, and helicopters. You can also customize your character's appearance and weapons.

-

To play GTA: Liberty City Stories on your Android device, you need to download and install GTA: Liberty City Stories APK and OBB files from a reliable source like APKCombo. You also need to make sure that your device meets the minimum and recommended specifications for running the game smoothly. You also need to follow some simple steps to install GTA: Liberty City Stories APK and OBB files on your Android device.

-

To optimize your gameplay experience and avoid common issues while playing GTA: Liberty City Stories on your Android device, you need to adjust the game's settings, controls, graphics, sound, and language to suit your preferences. You also need to save your progress, access cheats, use mods, and connect with other players online. You also need to fix some common issues that may occur while playing GTA: Liberty City Stories on your Android device.

-

If you are looking for a fun and immersive GTA game on your mobile device, you should definitely try out GTA: Liberty City Stories. It is one of the best GTA games on mobile devices, because it has a lot of features and content that will keep you entertained for hours. You can download and install GTA: Liberty City Stories APK and OBB files from APKCombo by following this link. You can also check out the official website of Rockstar Games for more information about the game.

-

So what are you waiting for? Download GTA: Liberty City Stories APK and OBB files now and enjoy the game on your Android device. And don't forget to share your feedback with us in the comments section below. We would love to hear from you!

-

FAQs

-

Here are some frequently asked questions about GTA: Liberty City Stories APK and OBB files:

-

Q: Is GTA: Liberty City Stories free to download and play?

-

A: No, GTA: Liberty City Stories is not free to download and play. It is a paid game that costs $6.99 on the Google Play Store. However, you can download and install GTA: Liberty City Stories APK and OBB files from APKCombo for free, as long as you have purchased the game legally from the Google Play Store.

-

Q: Is GTA: Liberty City Stories compatible with my Android device?

-

A: GTA: Liberty City Stories is compatible with most Android devices that run on Android 4.0 or higher. However, some devices may not be able to run the game smoothly due to their specifications or performance. You can check the minimum and recommended specifications for running the game smoothly in the Requirements section of this article.

-

Q: How much storage space do I need to download and install GTA: Liberty City Stories APK and OBB files?

-

A: You need at least 2 GB of free storage space on your Android device to download and install GTA: Liberty City Stories APK and OBB files. However, we recommend that you have at least 4 GB of free storage space or more to avoid any issues while playing the game.

-

Q: How long does it take to download and install GTA: Liberty City Stories APK and OBB files?

-

A: The time it takes to download and install GTA: Liberty City Stories APK and OBB files depends on your internet speed and your device's performance. On average, it may take around 10 to 15 minutes to complete the process.

-

Q: Can I play GTA: Liberty City Stories offline or online?

-

A: You can play GTA: Liberty City Stories offline or online. You can play the game offline without an internet connection, as long as you have downloaded and installed GTA: Liberty City Stories APK and OBB files correctly. You can also play the game online with other players by using the multiplayer mode that is included in the game.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/FateGrand Order PC How to Install and Play the Epic RPG on Your Computer.md b/spaces/1phancelerku/anime-remove-background/FateGrand Order PC How to Install and Play the Epic RPG on Your Computer.md deleted file mode 100644 index ee04004d932786e9a45fe96c0e3627ffeee1d6eb..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/FateGrand Order PC How to Install and Play the Epic RPG on Your Computer.md +++ /dev/null @@ -1,235 +0,0 @@ -
-

How to Download and Play Fate/Grand Order on PC

-

Do you love playing Fate/Grand Order on your mobile device but wish you could enjoy it on a bigger screen and with better performance? If so, you're not alone. Many fans of this popular mobile game based on the Fate franchise are looking for ways to play it on their computers. Fortunately, there is a simple solution: using an emulator.

-

download fate grand order computer


DOWNLOAD - https://jinyurl.com/2uNK7w



-

In this article, we will show you how to download and play Fate/Grand Order on PC using an emulator. We will also explain what is Fate/Grand Order, why you should play it on PC, how to choose the best emulator for it, how to optimize your gameplay experience on PC, and how to troubleshoot common issues. By the end of this article, you will be ready to enjoy this amazing game on your PC with ease.

-

What is Fate/Grand Order?

-

A brief introduction to the game and its features

-

Fate/Grand Order is a turn-based role-playing game with some visual novel elements. The player takes the role of a "Master" and commands a group of individuals called "Servants", who are typically historical, literary, and mythological figures from various cultures. The game is set in a world where human history is about to be erased by a mysterious force called "The Singularities". The player's mission is to travel back in time and restore human history by defeating enemies and solving the mystery of The Singularities.

-

The game features millions of words of original story, with an impressive cast of voice actors and a stunning soundtrack. The game also boasts hundreds of Servants to collect and customize, each with their own skills, stats, and Noble Phantasms (special attacks). The game is constantly updated with new events, stories, and Servants to keep the players engaged.

-

Why play Fate/Grand Order on PC?

-

Benefits of playing on PC vs mobile

-

While Fate/Grand Order is designed for mobile devices, there are many reasons why you might want to play it on PC instead. Here are some of the benefits of playing on PC:

-
    -
  • You can enjoy the game on a larger screen and with higher resolution, which makes the graphics and animations more immersive and detailed.
  • -
  • You can use your keyboard and mouse to control the game, which can be more comfortable and precise than tapping on a touchscreen.
  • -
  • You can avoid draining your battery or overheating your device, which can happen when playing for long periods of time on mobile.
  • -
  • You can save your phone's storage space and data usage, which can be limited or expensive depending on your plan.
  • -
  • You can access the game from multiple devices, such as your laptop or desktop, without having to transfer your account or data.
  • -
-

Requirements for playing on PC

-

To play Fate/Grand Order on PC, you will need two things: a computer and an emulator. A computer is the device that you will use to run the game, and an emulator is the software that will allow you to run the game as if it were a mobile app. Here are the minimum requirements for both:

- - - - - - - - - - - - - - - - - - - - - - - - - -
ComputerEmulator
Windows 7 or higherAndroid 5.0 or higher
Intel or AMD processorAt least 2 GB of RAM
At least 4 GB of RAMAt least 5 GB of disk space
At least 5 GB of disk spaceA stable internet connection
A stable internet connectionA Google account (optional but recommended)
-

If your computer meets these requirements, you should be able to play Fate/Grand Order on PC without any problems. However, if you want to improve your performance and experience, you might want to upgrade your hardware or software accordingly.

-

How to play fate grand order on pc with emulator
-Fate grand order pc version download free
-Best emulator for fate grand order on windows
-Fate grand order pc game system requirements
-Download fate grand order english apk for pc
-Fate grand order pc download noxplayer
-Fate grand order pc bluestacks guide
-Fate grand order pc gameplay and tips
-Fate grand order pc download qooapp
-Fate grand order pc download apkpure
-How to install fate grand order on mac
-Fate grand order mac download and play
-Fate grand order mac emulator options
-Fate grand order mac system requirements
-Fate grand order mac gameplay and tips
-Download fate grand order japanese version for pc
-Fate grand order jp pc download and install
-Fate grand order jp pc emulator guide
-Fate grand order jp pc gameplay and tips
-Fate grand order jp pc vpn and account setup
-Download fate grand order chinese version for pc
-Fate grand order cn pc download and install
-Fate grand order cn pc emulator guide
-Fate grand order cn pc gameplay and tips
-Fate grand order cn pc vpn and account setup
-Download fate grand order korean version for pc
-Fate grand order kr pc download and install
-Fate grand order kr pc emulator guide
-Fate grand order kr pc gameplay and tips
-Fate grand order kr pc vpn and account setup
-Download fate grand order arcade for pc
-Fate grand order arcade pc download and install
-Fate grand order arcade pc emulator guide
-Fate grand order arcade pc gameplay and tips
-Fate grand order arcade pc vpn and account setup
-Download fate grand order vr for pc
-Fate grand order vr pc download and install
-Fate grand order vr pc emulator guide
-Fate grand order vr pc gameplay and tips
-Fate grand order vr pc vpn and account setup
-Download fate stay night for pc
-Fate stay night pc download and install
-Fate stay night pc emulator guide
-Fate stay night pc gameplay and tips
-Fate stay night pc vpn and account setup
-Download fate extra for pc
-Fate extra pc download and install
-Fate extra pc emulator guide
-Fate extra pc gameplay and tips
-Fate extra pc vpn and account setup

-

How to install an emulator for Fate/Grand Order

-

What is an emulator and how does it work?

-

An emulator is a program that mimics the functions of another device or system. In this case, an emulator will allow you to run Android apps on your PC as if they were native applications. An emulator will create a virtual environment on your PC that will simulate the Android operating system and its features. This way, you can download and install Android apps from the Google Play Store or other sources and run them on your PC with ease.

-

Choosing the best emulator for Fate/Grand Order

-

Comparing different emulators and their pros and cons

-

There are many emulators available for PC that can run Android apps, but not all of them are suitable for Fate/Grand Order. Some emulators may have compatibility issues, performance problems, security risks, or limited features that can affect your gameplay experience. Therefore, you need to choose an emulator that is reliable, fast, safe, and feature-rich for Fate/Grand Order.

-

To help you choose the best emulator for Fate/Grand Order, we have compared some of the most popular emulators based on their pros and cons. Here is a table that summarizes our findings:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
EmulatorProsCons
BlueStacks- The most popular and trusted emulator for Android games
- Supports high-definition graphics and smooth gameplay
- Offers many features to enhance your game such as keyboard and mouse controls, multi-instance, sync, real-time translation, etc.
- Compatible with most Android apps and games
- Easy to install and use
- Free to download and play
- Requires a relatively high-end PC to run smoothly
- May have some ads or promotions
NoxPlayer- A fast and stable emulator for Android games
- Supports high-resolution graphics and smooth gameplay
- Offers many features to enhance your game such as keyboard and mouse controls, multi-instance, macro recorder, etc - Compatible with most Android apps and games
- Easy to install and use
- Free to download and play
- May have some compatibility issues with some games
- May have some ads or promotions
- May have some security risks or malware
LDPlayer- A lightweight and fast emulator for Android games
- Supports high-performance graphics and smooth gameplay
- Offers some features to enhance your game such as keyboard and mouse controls, multi-instance, etc.
- Compatible with most Android apps and games
- Easy to install and use
- Free to download and play
- May have some compatibility issues with some games
- May have some ads or promotions
- May have some security risks or malware
MEmu- A powerful and flexible emulator for Android games
- Supports high-quality graphics and smooth gameplay
- Offers many features to enhance your game such as keyboard and mouse controls, multi-instance, macro recorder, etc.
- Compatible with most Android apps and games
- Easy to install and use
- Free to download and play
- May have some compatibility issues with some games
- May have some ads or promotions
- May have some security risks or malware
Genymotion- A professional and advanced emulator for Android developers and testers
- Supports high-performance graphics and smooth gameplay
- Offers many features to customize your game such as screen size, resolution, device model, etc.
- Compatible with most Android apps and games
- Easy to install and use
- Requires a paid subscription for personal or commercial use
- May have some compatibility issues with some games
-

Recommending BlueStacks as the best emulator for Fate/Grand Order

-

Based on our comparison, we recommend BlueStacks as the best emulator for Fate/Grand Order. BlueStacks is the most popular and trusted emulator for Android games, with over 500 million users worldwide. It supports high-definition graphics and smooth gameplay, and offers many features to enhance your game such as keyboard and mouse controls, multi-instance, sync, real-time translation, etc. It is also compatible with most Android apps and games, including Fate/Grand Order. It is easy to install and use, and free to download and play.

-

BlueStacks is the emulator that we will use in this article to show you how to download and play Fate/Grand Order on PC. However, you can choose any other emulator that suits your preferences and needs. Just make sure that the emulator is reliable, fast, safe, and feature-rich for Fate/Grand Order.

-

How to download and install Fate/Grand Order on BlueStacks

-

Step-by-step guide for downloading and installing BlueStacks on PC

-

To download and install BlueStacks on your PC, follow these steps:

-
    -
  1. Go to the official website of BlueStacks at https://www.bluestacks.com/ and click on the "Download BlueStacks" button.
  2. -
  3. Wait for the download to finish and then run the installer file.
  4. -
  5. Follow the instructions on the screen to complete the installation process.
  6. -
  7. Launch BlueStacks on your PC and sign in with your Google account (optional but recommended).
  8. -
  9. Congratulations! You have successfully installed BlueStacks on your PC.
  10. -
-

Step-by-step guide for downloading and installing Fate/Grand Order on BlueStacks

-

To download and install Fate/Grand Order on BlueStacks, follow these steps:

-
    -
  1. Open BlueStacks on your PC and go to the home screen.
  2. -
  3. Click on the "Game Center" tab and search for "Fate/Grand Order" in the search bar.
  4. -
  5. Select the game from the results and click on the "Install" button.
  6. -
  7. Wait for the download and installation to finish.
  8. -
  9. Click on the "Open" button to launch the game.
  10. -
  11. Congratulations! You have successfully installed Fate/Grand Order on BlueStacks.
  12. -
-

How to transfer your account from mobile to PC

-

If you already have an account on Fate/Grand Order on your mobile device, you can transfer it to your PC using a transfer code. A transfer code is a unique code that you can generate from your game settings that will allow you to access your account from another device. To transfer your account from mobile to PC, follow these steps:- On your mobile device, open Fate/Grand Order and go to the menu.

-

- Tap on "My Room" and then on "Issue Transfer Number".

-

- Enter a password of your choice and tap on "Confirm".

-

- You will see a transfer code on the screen. Write it down or take a screenshot of it. Do not share it with anyone else.

-

- On your PC, open Fate/Grand Order on BlueStacks and go to the title screen.

-

- Tap on "Data Transfer" and enter your transfer code and password.

-

- Tap on "Confirm" and wait for the data transfer to complete.

-

- Congratulations! You have successfully transferred your account from mobile to PC.

-

How to optimize your gameplay experience on PC

-

How to use BlueStacks features to enhance your game

-

Using keyboard and mouse controls

-

One of the advantages of playing Fate/Grand Order on PC is that you can use your keyboard and mouse to control the game, which can be more comfortable and precise than tapping on a touchscreen. BlueStacks allows you to customize your keyboard and mouse controls according to your preferences. To use keyboard and mouse controls, follow these steps:

-
    -
  1. Open Fate/Grand Order on BlueStacks and go to the game screen.
  2. -
  3. Click on the keyboard icon on the right side of the screen.
  4. -
  5. You will see a list of predefined keyboard and mouse controls for the game. You can edit them by clicking on the edit icon or create new ones by clicking on the plus icon.
  6. -
  7. You can assign keys or mouse buttons to perform actions such as moving, attacking, selecting cards, using skills, etc.
  8. -
  9. You can also adjust the sensitivity, opacity, size, and position of the controls.
  10. -
  11. When you are done, click on "Save" and close the window.
  12. -
  13. You can now use your keyboard and mouse to play Fate/Grand Order on PC.
  14. -
-

Using multi-instance and sync features

-

Another advantage of playing Fate/Grand Order on PC is that you can use multiple instances of BlueStacks to run multiple accounts or games at the same time. This can be useful if you want to switch between different accounts or regions, or if you want to play other games while waiting for your stamina to refill. BlueStacks also allows you to sync your actions across multiple instances, which can be useful if you want to perform the same tasks or commands on different accounts or games. To use multi-instance and sync features, follow these steps:

-
    -
  1. Open BlueStacks and go to the home screen.
  2. -
  3. Click on the multi-instance icon on the right side of the screen.
  4. -
  5. You will see a list of existing instances or create new ones by clicking on the plus icon.
  6. -
  7. You can choose between fresh instances or clone instances. Fresh instances are new instances that start from scratch, while clone instances are copies of existing instances that share the same data and settings.
  8. -
  9. You can also choose between 32-bit or 64-bit instances depending on your system and game requirements.
  10. -
  11. When you have created or selected an instance, click on "Start" to launch it.
  12. -
  13. You can now run multiple instances of BlueStacks at the same time. You can switch between them by clicking on their tabs or windows.
  14. -
  15. If you want to sync your actions across multiple instances, click on the sync icon on the right side of the screen.
  16. -
  17. You will see a list of available instances to sync with. Select the ones you want and click on "Start Syncing".
  18. -
  19. You can now perform actions on one instance and see them replicated on other instances. You can stop syncing by clicking on "Stop Syncing".
  20. -
-

Using real-time translation and high definition graphics features

-

A final advantage of playing Fate/Grand Order on PC is that you can use real-time translation and high definition graphics features to enhance your game. Real-time translation allows you to translate any text in the game into your preferred language, which can be useful if you want to play in different regions or understand the story better. High definition graphics allows you to adjust the graphics quality and resolution of the game, which can make it more immersive and detailed. To use real-time translation and high definition graphics features, follow these steps:

-
    -
  1. Open Fate/Grand Order on BlueStacks and go to the game screen.
  2. -
  3. Click on the settings icon on the right side of the screen.
  4. -
  5. To use real-time translation, click on the translation icon and select your preferred language. You can also toggle the translation mode to auto or manual. You can also adjust the translation speed and accuracy.
  6. -
  7. To use high definition graphics, click on the graphics icon and select your preferred graphics quality and resolution. You can also enable or disable anti-aliasing, FPS, and V-sync.
  8. -
  9. When you are done, click on "Save" and close the window.
  10. -
  11. You can now enjoy Fate/Grand Order on PC with real-time translation and high definition graphics.
  12. -
-

Conclusion

-

Summarizing the main points of the article

-

In this article, we have shown you how to download and play Fate/Grand Order on PC using an emulator. We have also explained what is Fate/Grand Order, why you should play it on PC, how to choose the best emulator for it, how to optimize your gameplay experience on PC, and how to troubleshoot common issues. By following our guide, you will be able to enjoy this amazing game on your PC with ease.

-

Providing some tips and tricks for playing Fate/Grand Order on PC

-

Before we end this article, we would like to share some tips and tricks for playing Fate/Grand Order on PC that can help you improve your game and have more fun. Here are some of them:

-
    -
  • Use the game's wiki or guides to learn more about the game's mechanics, characters, events, and strategies.
  • -
  • Join the game's community on social media or forums to interact with other players, get news and updates, and participate in contests and giveaways.
  • -
  • Backup your account data regularly using a transfer code or a bind code to avoid losing your progress or data.
  • -
  • Use the game's support system or customer service to report any bugs, errors, or issues that you encounter in the game or the emulator.
  • -
  • Have fun and enjoy the game!
  • -
-

Ending with a call to action or a question

-

We hope that this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you. Thank you for reading and happy gaming!

-

Frequently Asked Questions

-

Is Fate/Grand Order free to play?

-

Yes, Fate/Grand Order is free to download and play on both mobile devices and PC. However, the game has some optional in-app purchases that can enhance your gameplay experience or speed up your progress. You can buy items such as Saint Quartz, Golden Apples, Summon Tickets, etc. using real money. However, these items are not necessary to enjoy the game and you can earn them for free by playing the game regularly.

-

Is Fate/Grand Order available in my region?

-

Fate/Grand Order is currently available in four regions: Japan, North America, China, and Korea. Each region has its own server, language, and content. You can choose which region you want to play in by downloading the corresponding version of the game from the Google Play Store or other sources. However, you cannot transfer your account or data between different regions, so make sure you choose wisely.

-

How can I get more Servants in Fate/Grand Order?

-

You can get more Servants in Fate/Grand Order by using the Summon system. The Summon system allows you to spend Saint Quartz or Summon Tickets to randomly obtain Servants or Craft Essences (items that can boost your Servants' abilities). There are different types of Summons such as Story Summons, Event Summons, Limited Summons, etc. that offer different rates and pools of Servants and Craft Essences. You can also get some Servants for free by completing certain quests or events in the game.

-

How can I level up my Servants in Fate/Grand Order?

-

You can level up your Servants in Fate/Grand Order by using EXP Cards. EXP Cards are items that can grant a certain amount of experience points to your Servants when used. You can obtain EXP Cards by completing Daily Quests or Events in the game. You can also use other Servants or Craft Essences as materials to level up your Servants, but this will consume them permanently. You can also increase your Servants' level cap by using Ascension Materials or Grails.

-

How can I improve my combat skills in Fate/Grand Order?

-

You can improve your combat skills in Fate/Grand Order by learning the basics of the combat system and applying some strategies. The combat system is based on a turn-based card system where you have to select three cards from a pool of five cards every turn. The cards represent your Serv ants' actions, such as Buster (red), Arts (blue), or Quick (green). The cards have different effects and combinations that can affect your damage, NP gauge, critical stars, etc. You also have to consider your Servants' skills, Noble Phantasms, class advantages, and team composition when fighting. You can learn more about the combat system and strategies by reading the game's wiki or guides, or by watching some gameplay videos or tutorials online.

-

-

That's it for this article. We hope that you have learned how to download and play Fate/Grand Order on PC using an emulator. We also hope that you have enjoyed reading this article and found it useful and interesting. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you. Thank you for reading and happy gaming!

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/2-2/blockchain.ai/README.md b/spaces/2-2/blockchain.ai/README.md deleted file mode 100644 index 0ab6c87ab6775c175ff291e1d636b4972d7b11d9..0000000000000000000000000000000000000000 --- a/spaces/2-2/blockchain.ai/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Blockchain.ai -emoji: 👀 -colorFrom: blue -colorTo: gray -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/2023Liu2023/bingo/src/pages/api/healthz.ts b/spaces/2023Liu2023/bingo/src/pages/api/healthz.ts deleted file mode 100644 index f6ae44ff0fd66ccd3f7feaa550025fbf2a83bf77..0000000000000000000000000000000000000000 --- a/spaces/2023Liu2023/bingo/src/pages/api/healthz.ts +++ /dev/null @@ -1,7 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - res.status(200).end('ok') -} diff --git a/spaces/656-156/Real-CUGAN/app.py b/spaces/656-156/Real-CUGAN/app.py deleted file mode 100644 index 2439c5cec6b61e8a517f957daf710cbb6b5c3cf6..0000000000000000000000000000000000000000 --- a/spaces/656-156/Real-CUGAN/app.py +++ /dev/null @@ -1,62 +0,0 @@ -from upcunet_v3 import RealWaifuUpScaler -import gradio as gr -import time -import logging -import os -from PIL import ImageOps -import numpy as np -import math - - -def greet(input_img, input_model_name, input_tile_mode): - # if input_img.size[0] * input_img.size[1] > 256 * 256: - # y = int(math.sqrt(256*256/input_img.size[0]*input_img.size[1])) - # x = int(input_img.size[0]/input_img.size[1]*y) - # input_img = ImageOps.fit(input_img, (x, y)) - input_img = np.array(input_img) - if input_model_name not in model_cache: - t1 = time.time() - upscaler = RealWaifuUpScaler(input_model_name[2], ModelPath + input_model_name, half=False, device="cpu") - t2 = time.time() - logger.info(f'load model time, {t2 - t1}') - model_cache[input_model_name] = upscaler - else: - upscaler = model_cache[input_model_name] - logger.info(f'load model from cache') - - start = time.time() - result = upscaler(input_img, tile_mode=input_tile_mode) - end = time.time() - logger.info(f'input_model_name, {input_model_name}') - logger.info(f'input_tile_mode, {input_tile_mode}') - logger.info(f'input shape, {input_img.shape}') - logger.info(f'output shape, {result.shape}') - logger.info(f'speed time, {end - start}') - return result - - -if __name__ == '__main__': - logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s") - logger = logging.getLogger() - - ModelPath = "weights_v3/" - model_cache = {} - - input_model_name = gr.inputs.Dropdown(os.listdir(ModelPath), default="up2x-latest-denoise2x.pth", label='选择model') - input_tile_mode = gr.inputs.Dropdown([0, 1, 2, 3, 4], default=2, label='选择tile_mode') - input_img = gr.inputs.Image(label='image', type='pil') - - inputs = [input_img, input_model_name, input_tile_mode] - outputs = "image" - iface = gr.Interface(fn=greet, - inputs=inputs, - outputs=outputs, - allow_screenshot=False, - allow_flagging='never', - examples=[['test-img.jpg', "up2x-latest-denoise2x.pth", 2]], - article='[https://github.com/bilibili/ailab/tree/main/Real-CUGAN](https://github.com/bilibili/ailab/tree/main/Real-CUGAN)
' - '感谢b站开源的项目,图片过大会导致内存不足,所有我将图片裁剪小,想体验大图片的效果请自行前往上面的链接。
' - '修改bbb' - 'The large image will lead to memory limit exceeded. So I crop and resize image. ' - 'If you want to experience the large image, please go to the link above.') - iface.launch() diff --git a/spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/spec_utils.py b/spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/spec_utils.py deleted file mode 100644 index a9634fd51ff47bf90211839231774719154c37cf..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/spec_utils.py +++ /dev/null @@ -1,672 +0,0 @@ -import hashlib -import json -import math -import os - -import librosa -import numpy as np -import soundfile as sf -from tqdm import tqdm - - -def crop_center(h1, h2): - h1_shape = h1.size() - h2_shape = h2.size() - - if h1_shape[3] == h2_shape[3]: - return h1 - elif h1_shape[3] < h2_shape[3]: - raise ValueError("h1_shape[3] must be greater than h2_shape[3]") - - # s_freq = (h2_shape[2] - h1_shape[2]) // 2 - # e_freq = s_freq + h1_shape[2] - s_time = (h1_shape[3] - h2_shape[3]) // 2 - e_time = s_time + h2_shape[3] - h1 = h1[:, :, :, s_time:e_time] - - return h1 - - -def wave_to_spectrogram( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length) - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def wave_to_spectrogram_mt( - wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False -): - import threading - - if reverse: - wave_left = np.flip(np.asfortranarray(wave[0])) - wave_right = np.flip(np.asfortranarray(wave[1])) - elif mid_side: - wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) - elif mid_side_b2: - wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5)) - wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5)) - else: - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - - def run_thread(**kwargs): - global spec_left - spec_left = librosa.stft(**kwargs) - - thread = threading.Thread( - target=run_thread, - kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length}, - ) - thread.start() - spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) - thread.join() - - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def combine_spectrograms(specs, mp): - l = min([specs[i].shape[2] for i in specs]) - spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64) - offset = 0 - bands_n = len(mp.param["band"]) - - for d in range(1, bands_n + 1): - h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"] - spec_c[:, offset : offset + h, :l] = specs[d][ - :, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l - ] - offset += h - - if offset > mp.param["bins"]: - raise ValueError("Too much bins") - - # lowpass fiter - if ( - mp.param["pre_filter_start"] > 0 - ): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']: - if bands_n == 1: - spec_c = fft_lp_filter( - spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"] - ) - else: - gp = 1 - for b in range( - mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"] - ): - g = math.pow( - 10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0 - ) - gp = g - spec_c[:, b, :] *= g - - return np.asfortranarray(spec_c) - - -def spectrogram_to_image(spec, mode="magnitude"): - if mode == "magnitude": - if np.iscomplexobj(spec): - y = np.abs(spec) - else: - y = spec - y = np.log10(y**2 + 1e-8) - elif mode == "phase": - if np.iscomplexobj(spec): - y = np.angle(spec) - else: - y = spec - - y -= y.min() - y *= 255 / y.max() - img = np.uint8(y) - - if y.ndim == 3: - img = img.transpose(1, 2, 0) - img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2) - - return img - - -def reduce_vocal_aggressively(X, y, softmask): - v = X - y - y_mag_tmp = np.abs(y) - v_mag_tmp = np.abs(v) - - v_mask = v_mag_tmp > y_mag_tmp - y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf) - - return y_mag * np.exp(1.0j * np.angle(y)) - - -def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32): - if min_range < fade_size * 2: - raise ValueError("min_range must be >= fade_area * 2") - - mag = mag.copy() - - idx = np.where(ref.mean(axis=(0, 1)) < thres)[0] - starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0]) - ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1]) - uninformative = np.where(ends - starts > min_range)[0] - if len(uninformative) > 0: - starts = starts[uninformative] - ends = ends[uninformative] - old_e = None - for s, e in zip(starts, ends): - if old_e is not None and s - old_e < fade_size: - s = old_e - fade_size * 2 - - if s != 0: - weight = np.linspace(0, 1, fade_size) - mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size] - else: - s -= fade_size - - if e != mag.shape[2]: - weight = np.linspace(1, 0, fade_size) - mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e] - else: - e += fade_size - - mag[:, :, s + fade_size : e - fade_size] += ref[ - :, :, s + fade_size : e - fade_size - ] - old_e = e - - return mag - - -def align_wave_head_and_tail(a, b): - l = min([a[0].size, b[0].size]) - - return a[:l, :l], b[:l, :l] - - -def cache_or_load(mix_path, inst_path, mp): - mix_basename = os.path.splitext(os.path.basename(mix_path))[0] - inst_basename = os.path.splitext(os.path.basename(inst_path))[0] - - cache_dir = "mph{}".format( - hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest() - ) - mix_cache_dir = os.path.join("cache", cache_dir) - inst_cache_dir = os.path.join("cache", cache_dir) - - os.makedirs(mix_cache_dir, exist_ok=True) - os.makedirs(inst_cache_dir, exist_ok=True) - - mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy") - inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy") - - if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path): - X_spec_m = np.load(mix_cache_path) - y_spec_m = np.load(inst_cache_path) - else: - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - X_wave[d], _ = librosa.load( - mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"] - ) - y_wave[d], _ = librosa.load( - inst_path, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - else: # lower bands - X_wave[d] = librosa.resample( - X_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - y_wave[d] = librosa.resample( - y_wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d]) - - X_spec_s[d] = wave_to_spectrogram( - X_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - y_spec_s[d] = wave_to_spectrogram( - y_wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - del X_wave, y_wave - - X_spec_m = combine_spectrograms(X_spec_s, mp) - y_spec_m = combine_spectrograms(y_spec_s, mp) - - if X_spec_m.shape != y_spec_m.shape: - raise ValueError("The combined spectrograms are different: " + mix_path) - - _, ext = os.path.splitext(mix_path) - - np.save(mix_cache_path, X_spec_m) - np.save(inst_cache_path, y_spec_m) - - return X_spec_m, y_spec_m - - -def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hop_length) - wave_right = librosa.istft(spec_right, hop_length=hop_length) - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2): - import threading - - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - def run_thread(**kwargs): - global wave_left - wave_left = librosa.istft(**kwargs) - - thread = threading.Thread( - target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length} - ) - thread.start() - wave_right = librosa.istft(spec_right, hop_length=hop_length) - thread.join() - - if reverse: - return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) - elif mid_side: - return np.asfortranarray( - [np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)] - ) - elif mid_side_b2: - return np.asfortranarray( - [ - np.add(wave_right / 1.25, 0.4 * wave_left), - np.subtract(wave_left / 1.25, 0.4 * wave_right), - ] - ) - else: - return np.asfortranarray([wave_left, wave_right]) - - -def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None): - wave_band = {} - bands_n = len(mp.param["band"]) - offset = 0 - - for d in range(1, bands_n + 1): - bp = mp.param["band"][d] - spec_s = np.ndarray( - shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex - ) - h = bp["crop_stop"] - bp["crop_start"] - spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[ - :, offset : offset + h, : - ] - - offset += h - if d == bands_n: # higher - if extra_bins_h: # if --high_end_process bypass - max_bin = bp["n_fft"] // 2 - spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[ - :, :extra_bins_h, : - ] - if bp["hpf_start"] > 0: - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - if bands_n == 1: - wave = spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - else: - wave = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - else: - sr = mp.param["band"][d + 1]["sr"] - if d == 1: # lower - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave = librosa.resample( - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - bp["sr"], - sr, - res_type="sinc_fastest", - ) - else: # mid - spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1) - spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"]) - wave2 = np.add( - wave, - spectrogram_to_wave( - spec_s, - bp["hl"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ), - ) - # wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest") - wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy") - - return wave.T - - -def fft_lp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop): - g -= 1 / (bin_stop - bin_start) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, bin_stop:, :] *= 0 - - return spec - - -def fft_hp_filter(spec, bin_start, bin_stop): - g = 1.0 - for b in range(bin_start, bin_stop, -1): - g -= 1 / (bin_start - bin_stop) - spec[:, b, :] = g * spec[:, b, :] - - spec[:, 0 : bin_stop + 1, :] *= 0 - - return spec - - -def mirroring(a, spec_m, input_high_end, mp): - if "mirroring" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mirror = mirror * np.exp(1.0j * np.angle(input_high_end)) - - return np.where( - np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror - ) - - if "mirroring2" == a: - mirror = np.flip( - np.abs( - spec_m[ - :, - mp.param["pre_filter_start"] - - 10 - - input_high_end.shape[1] : mp.param["pre_filter_start"] - - 10, - :, - ] - ), - 1, - ) - mi = np.multiply(mirror, input_high_end * 1.7) - - return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi) - - -def ensembling(a, specs): - for i in range(1, len(specs)): - if i == 1: - spec = specs[0] - - ln = min([spec.shape[2], specs[i].shape[2]]) - spec = spec[:, :, :ln] - specs[i] = specs[i][:, :, :ln] - - if "min_mag" == a: - spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec) - if "max_mag" == a: - spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec) - - return spec - - -def stft(wave, nfft, hl): - wave_left = np.asfortranarray(wave[0]) - wave_right = np.asfortranarray(wave[1]) - spec_left = librosa.stft(wave_left, nfft, hop_length=hl) - spec_right = librosa.stft(wave_right, nfft, hop_length=hl) - spec = np.asfortranarray([spec_left, spec_right]) - - return spec - - -def istft(spec, hl): - spec_left = np.asfortranarray(spec[0]) - spec_right = np.asfortranarray(spec[1]) - - wave_left = librosa.istft(spec_left, hop_length=hl) - wave_right = librosa.istft(spec_right, hop_length=hl) - wave = np.asfortranarray([wave_left, wave_right]) - - -if __name__ == "__main__": - import argparse - import sys - import time - - import cv2 - from model_param_init import ModelParameters - - p = argparse.ArgumentParser() - p.add_argument( - "--algorithm", - "-a", - type=str, - choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"], - default="min_mag", - ) - p.add_argument( - "--model_params", - "-m", - type=str, - default=os.path.join("modelparams", "1band_sr44100_hl512.json"), - ) - p.add_argument("--output_name", "-o", type=str, default="output") - p.add_argument("--vocals_only", "-v", action="store_true") - p.add_argument("input", nargs="+") - args = p.parse_args() - - start_time = time.time() - - if args.algorithm.startswith("invert") and len(args.input) != 2: - raise ValueError("There should be two input files.") - - if not args.algorithm.startswith("invert") and len(args.input) < 2: - raise ValueError("There must be at least two input files.") - - wave, specs = {}, {} - mp = ModelParameters(args.model_params) - - for i in range(len(args.input)): - spec = {} - - for d in range(len(mp.param["band"]), 0, -1): - bp = mp.param["band"][d] - - if d == len(mp.param["band"]): # high-end band - wave[d], _ = librosa.load( - args.input[i], - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - - if len(wave[d].shape) == 1: # mono to stereo - wave[d] = np.array([wave[d], wave[d]]) - else: # lower bands - wave[d] = librosa.resample( - wave[d + 1], - mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - - spec[d] = wave_to_spectrogram( - wave[d], - bp["hl"], - bp["n_fft"], - mp.param["mid_side"], - mp.param["mid_side_b2"], - mp.param["reverse"], - ) - - specs[i] = combine_spectrograms(spec, mp) - - del wave - - if args.algorithm == "deep": - d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1]) - v_spec = d_spec - specs[1] - sf.write( - os.path.join("{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - - if args.algorithm.startswith("invert"): - ln = min([specs[0].shape[2], specs[1].shape[2]]) - specs[0] = specs[0][:, :, :ln] - specs[1] = specs[1][:, :, :ln] - - if "invert_p" == args.algorithm: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) - v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0])) - else: - specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2) - v_spec = specs[0] - specs[1] - - if not args.vocals_only: - X_mag = np.abs(specs[0]) - y_mag = np.abs(specs[1]) - v_mag = np.abs(v_spec) - - X_image = spectrogram_to_image(X_mag) - y_image = spectrogram_to_image(y_mag) - v_image = spectrogram_to_image(v_mag) - - cv2.imwrite("{}_X.png".format(args.output_name), X_image) - cv2.imwrite("{}_y.png".format(args.output_name), y_image) - cv2.imwrite("{}_v.png".format(args.output_name), v_image) - - sf.write( - "{}_X.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[0], mp), - mp.param["sr"], - ) - sf.write( - "{}_y.wav".format(args.output_name), - cmb_spectrogram_to_wave(specs[1], mp), - mp.param["sr"], - ) - - sf.write( - "{}_v.wav".format(args.output_name), - cmb_spectrogram_to_wave(v_spec, mp), - mp.param["sr"], - ) - else: - if not args.algorithm == "deep": - sf.write( - os.path.join("ensembled", "{}.wav".format(args.output_name)), - cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp), - mp.param["sr"], - ) - - if args.algorithm == "align": - trackalignment = [ - { - "file1": '"{}"'.format(args.input[0]), - "file2": '"{}"'.format(args.input[1]), - } - ] - - for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."): - os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}") - - # print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1)) diff --git a/spaces/A666sxr/Genshin_TTS/stft_loss.py b/spaces/A666sxr/Genshin_TTS/stft_loss.py deleted file mode 100644 index 08120d2a923b77b04ed231195bc8b5aa4568602b..0000000000000000000000000000000000000000 --- a/spaces/A666sxr/Genshin_TTS/stft_loss.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""STFT-based Loss modules.""" - -import torch -import torch.nn.functional as F - - -def stft(x, fft_size, hop_size, win_length, window): - """Perform STFT and convert to magnitude spectrogram. - Args: - x (Tensor): Input signal tensor (B, T). - fft_size (int): FFT size. - hop_size (int): Hop size. - win_length (int): Window length. - window (str): Window function type. - Returns: - Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1). - """ - x_stft = torch.stft(x, fft_size, hop_size, win_length, window.to(x.device)) - real = x_stft[..., 0] - imag = x_stft[..., 1] - - # NOTE(kan-bayashi): clamp is needed to avoid nan or inf - return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1) - - -class SpectralConvergengeLoss(torch.nn.Module): - """Spectral convergence loss module.""" - - def __init__(self): - """Initilize spectral convergence loss module.""" - super(SpectralConvergengeLoss, self).__init__() - - def forward(self, x_mag, y_mag): - """Calculate forward propagation. - Args: - x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). - y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). - Returns: - Tensor: Spectral convergence loss value. - """ - return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro") - - -class LogSTFTMagnitudeLoss(torch.nn.Module): - """Log STFT magnitude loss module.""" - - def __init__(self): - """Initilize los STFT magnitude loss module.""" - super(LogSTFTMagnitudeLoss, self).__init__() - - def forward(self, x_mag, y_mag): - """Calculate forward propagation. - Args: - x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins). - y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins). - Returns: - Tensor: Log STFT magnitude loss value. - """ - return F.l1_loss(torch.log(y_mag), torch.log(x_mag)) - - -class STFTLoss(torch.nn.Module): - """STFT loss module.""" - - def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"): - """Initialize STFT loss module.""" - super(STFTLoss, self).__init__() - self.fft_size = fft_size - self.shift_size = shift_size - self.win_length = win_length - self.window = getattr(torch, window)(win_length) - self.spectral_convergenge_loss = SpectralConvergengeLoss() - self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss() - - def forward(self, x, y): - """Calculate forward propagation. - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - Returns: - Tensor: Spectral convergence loss value. - Tensor: Log STFT magnitude loss value. - """ - x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window) - y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window) - sc_loss = self.spectral_convergenge_loss(x_mag, y_mag) - mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag) - - return sc_loss, mag_loss - - -class MultiResolutionSTFTLoss(torch.nn.Module): - """Multi resolution STFT loss module.""" - - def __init__(self, - fft_sizes=[1024, 2048, 512], - hop_sizes=[120, 240, 50], - win_lengths=[600, 1200, 240], - window="hann_window"): - """Initialize Multi resolution STFT loss module. - Args: - fft_sizes (list): List of FFT sizes. - hop_sizes (list): List of hop sizes. - win_lengths (list): List of window lengths. - window (str): Window function type. - """ - super(MultiResolutionSTFTLoss, self).__init__() - assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) - self.stft_losses = torch.nn.ModuleList() - for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths): - self.stft_losses += [STFTLoss(fs, ss, wl, window)] - - def forward(self, x, y): - """Calculate forward propagation. - Args: - x (Tensor): Predicted signal (B, T). - y (Tensor): Groundtruth signal (B, T). - Returns: - Tensor: Multi resolution spectral convergence loss value. - Tensor: Multi resolution log STFT magnitude loss value. - """ - sc_loss = 0.0 - mag_loss = 0.0 - for f in self.stft_losses: - sc_l, mag_l = f(x, y) - sc_loss += sc_l - mag_loss += mag_l - sc_loss /= len(self.stft_losses) - mag_loss /= len(self.stft_losses) - - return sc_loss, mag_loss \ No newline at end of file diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/autoencoder.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/autoencoder.py deleted file mode 100644 index cfbdceba0e1171b052f797885530bacd0f3c73d5..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/variational_autoencoder/autoencoder.py +++ /dev/null @@ -1,102 +0,0 @@ -import torch -from audioldm.latent_diffusion.ema import * -from audioldm.variational_autoencoder.modules import Encoder, Decoder -from audioldm.variational_autoencoder.distributions import DiagonalGaussianDistribution - -from audioldm.hifigan.utilities import get_vocoder, vocoder_infer - -class AutoencoderKL(nn.Module): - def __init__( - self, - ddconfig=None, - lossconfig=None, - image_key="fbank", - embed_dim=None, - time_shuffle=1, - subband=1, - ckpt_path=None, - reload_from_ckpt=None, - ignore_keys=[], - colorize_nlabels=None, - monitor=None, - base_learning_rate=1e-5, - ): - super().__init__() - - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - - self.subband = int(subband) - - if self.subband > 1: - print("Use subband decomposition %s" % self.subband) - - self.quant_conv = torch.nn.Conv2d(2 * ddconfig["z_channels"], 2 * embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - - self.vocoder = get_vocoder(None, "cpu") - self.embed_dim = embed_dim - - if monitor is not None: - self.monitor = monitor - - self.time_shuffle = time_shuffle - self.reload_from_ckpt = reload_from_ckpt - self.reloaded = False - self.mean, self.std = None, None - - def encode(self, x): - # x = self.time_shuffle_operation(x) - x = self.freq_split_subband(x) - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z): - z = self.post_quant_conv(z) - dec = self.decoder(z) - dec = self.freq_merge_subband(dec) - return dec - - def decode_to_waveform(self, dec): - dec = dec.squeeze(1).permute(0, 2, 1) - wav_reconstruction = vocoder_infer(dec, self.vocoder) - return wav_reconstruction - - def forward(self, input, sample_posterior=True): - posterior = self.encode(input) - if sample_posterior: - z = posterior.sample() - else: - z = posterior.mode() - - if self.flag_first_run: - print("Latent size: ", z.size()) - self.flag_first_run = False - - dec = self.decode(z) - - return dec, posterior - - def freq_split_subband(self, fbank): - if self.subband == 1 or self.image_key != "stft": - return fbank - - bs, ch, tstep, fbins = fbank.size() - - assert fbank.size(-1) % self.subband == 0 - assert ch == 1 - - return ( - fbank.squeeze(1) - .reshape(bs, tstep, self.subband, fbins // self.subband) - .permute(0, 2, 1, 3) - ) - - def freq_merge_subband(self, subband_fbank): - if self.subband == 1 or self.image_key != "stft": - return subband_fbank - assert subband_fbank.size(1) == self.subband # Channel dimension - bs, sub_ch, tstep, fbins = subband_fbank.size() - return subband_fbank.permute(0, 2, 1, 3).reshape(bs, tstep, -1).unsqueeze(1) diff --git a/spaces/ALSv/FSW/roop/typing.py b/spaces/ALSv/FSW/roop/typing.py deleted file mode 100644 index 1cff7440616e20bfe7b8bc287f86d11bf1b0f083..0000000000000000000000000000000000000000 --- a/spaces/ALSv/FSW/roop/typing.py +++ /dev/null @@ -1,7 +0,0 @@ -from typing import Any - -from insightface.app.common import Face -import numpy - -Face = Face -Frame = numpy.ndarray[Any, Any] diff --git a/spaces/AQaTaHaGoD/GoD/README.md b/spaces/AQaTaHaGoD/GoD/README.md deleted file mode 100644 index 24004667a73123b1c92e9e56f564e9a73919a51a..0000000000000000000000000000000000000000 --- a/spaces/AQaTaHaGoD/GoD/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: GoD -emoji: 🌍 -colorFrom: red -colorTo: blue -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest200.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest200.py deleted file mode 100644 index 40d8f03e7f528f8c0132bd2c19515460fd47fe70..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnest200.py +++ /dev/null @@ -1,25 +0,0 @@ -# model settings -model = dict( - type='ImageClassifier', - backbone=dict( - type='ResNeSt', - depth=200, - num_stages=4, - stem_channels=128, - out_indices=(3, ), - style='pytorch'), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='LinearClsHead', - num_classes=1000, - in_channels=2048, - loss=dict( - type='LabelSmoothLoss', - label_smooth_val=0.1, - num_classes=1000, - reduction='mean', - loss_weight=1.0), - topk=(1, 5), - cal_acc=False), - train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), -) diff --git a/spaces/Abhaykoul/Youtube_video_downloader/app.py b/spaces/Abhaykoul/Youtube_video_downloader/app.py deleted file mode 100644 index 04e61cab265fa2dc31d98aec34bae22763f1ad48..0000000000000000000000000000000000000000 --- a/spaces/Abhaykoul/Youtube_video_downloader/app.py +++ /dev/null @@ -1,59 +0,0 @@ -import streamlit as st -from pytube import YouTube - -class YouTubeDownloader: - @staticmethod - def run(): - st.header("YouTube Video Downloader") - url = st.text_input("Enter YouTube URL to download:") - if url: - YouTubeDownloader.validate_url(url) - with st.expander("preview video"): - st.video(url) - if st.button("Download"): - YouTubeDownloader.cleanup() - file_ = YouTubeDownloader.download_video(url) - st.video(file_) - YouTubeDownloader.helper_message() - st.markdown("> App made by Abhay Koul ([HelpingAI on GitHub](https://github.com/HelpingAI))") - - - @staticmethod - def download_video(url): - with st.spinner("Downloading..."): - local_file = ( - YouTube(url) - .streams.filter(progressive=True, file_extension="mp4") - .first() - .download() - ) - st.success("Downloaded") - return local_file - - @staticmethod - def validate_url(url): - import validators - - if not validators.url(url): - st.error("Hi there 👋 URL seems invalid 👽") - st.stop() - - @classmethod - def cleanup(cls): - import pathlib - import glob - - junks = glob.glob("*.mp4") - for junk in junks: - pathlib.Path(junk).unlink() - - @classmethod - def helper_message(cls): - st.write( - "> To save the video to local computer, " - "click the vertical ... icon (aka hamburger button) in the bottom-right corner (in the video above) and click download." - ) - - -if __name__ == "__main__": - YouTubeDownloader.run() \ No newline at end of file diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/openpose/src/util.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/openpose/src/util.py deleted file mode 100644 index d8f622d7e54c7103d4cc43a0cdcae96a0b3145d5..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/openpose/src/util.py +++ /dev/null @@ -1,106 +0,0 @@ -import numpy as np -import math -import cv2 -import matplotlib -from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas -from matplotlib.figure import Figure -import numpy as np -import matplotlib.pyplot as plt -import cv2 - - -def padRightDownCorner(img, stride, padValue): - h = img.shape[0] - w = img.shape[1] - - pad = 4 * [None] - pad[0] = 0 # up - pad[1] = 0 # left - pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down - pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right - - img_padded = img - pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1)) - img_padded = np.concatenate((pad_up, img_padded), axis=0) - pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1)) - img_padded = np.concatenate((pad_left, img_padded), axis=1) - pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1)) - img_padded = np.concatenate((img_padded, pad_down), axis=0) - pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1)) - img_padded = np.concatenate((img_padded, pad_right), axis=1) - - return img_padded, pad - -# transfer caffe model to pytorch which will match the layer name - - -def transfer(model, model_weights): - transfered_model_weights = {} - for weights_name in model.state_dict().keys(): - transfered_model_weights[weights_name] = model_weights['.'.join( - weights_name.split('.')[1:])] - return transfered_model_weights - -# draw the body keypoint and lims - - -def draw_bodypose(canvas, candidate, subset, show_number=False): - stickwidth = 4 - limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], - [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], - [1, 16], [16, 18], [3, 17], [6, 18]] - - colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], - [0, 255, 85], [0, 255, 170], [0, 255, 255], [ - 0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], - [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] - for i in range(18): - for n in range(len(subset)): - index = int(subset[n][i]) - if index == -1: - continue - x, y = candidate[index][0:2] - cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1) - if show_number: - cv2.putText(canvas, f'{index}', (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, - (255, 255, 0), 1, cv2.LINE_AA) - # calc and print average - for i in range(17): - for n in range(len(subset)): - index = subset[n][np.array(limbSeq[i]) - 1] - if -1 in index: - continue - cur_canvas = canvas.copy() - Y = candidate[index.astype(int), 0] - X = candidate[index.astype(int), 1] - mX = np.mean(X) - mY = np.mean(Y) - length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 - angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) - polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int( - length / 2), stickwidth), int(angle), 0, 360, 1) - cv2.fillConvexPoly(cur_canvas, polygon, colors[i]) - canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0) - - return canvas - -# get max index of 2d array - - -def npmax(array): - arrayindex = array.argmax(1) - arrayvalue = array.max(1) - i = arrayvalue.argmax() - j = arrayindex[i] - return i, j - -# get max index of 2d array - - -def npmax_with_score(array): - arrayindex = array.argmax(1) - arrayvalue = array.max(1) - i = arrayvalue.argmax() - j = arrayindex[i] - score = array[i][j] - return i, j, score diff --git a/spaces/Amrrs/DragGan-Inversion/torch_utils/ops/filtered_lrelu.cpp b/spaces/Amrrs/DragGan-Inversion/torch_utils/ops/filtered_lrelu.cpp deleted file mode 100644 index ff4149b8b46b54d2f400ae10e44d19f20503ba1f..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/torch_utils/ops/filtered_lrelu.cpp +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "filtered_lrelu.h" - -//------------------------------------------------------------------------ - -static std::tuple filtered_lrelu( - torch::Tensor x, torch::Tensor fu, torch::Tensor fd, torch::Tensor b, torch::Tensor si, - int up, int down, int px0, int px1, int py0, int py1, int sx, int sy, float gain, float slope, float clamp, bool flip_filters, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(fu.device() == x.device() && fd.device() == x.device() && b.device() == x.device(), "all input tensors must reside on the same device"); - TORCH_CHECK(fu.dtype() == torch::kFloat && fd.dtype() == torch::kFloat, "fu and fd must be float32"); - TORCH_CHECK(b.dtype() == x.dtype(), "x and b must have the same dtype"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat, "x and b must be float16 or float32"); - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK((fu.dim() == 1 || fu.dim() == 2) && (fd.dim() == 1 || fd.dim() == 2), "fu and fd must be rank 1 or 2"); - TORCH_CHECK(fu.size(0) <= INT_MAX && fu.size(-1) <= INT_MAX, "fu is too large"); - TORCH_CHECK(fd.size(0) <= INT_MAX && fd.size(-1) <= INT_MAX, "fd is too large"); - TORCH_CHECK(fu.numel() > 0, "fu is empty"); - TORCH_CHECK(fd.numel() > 0, "fd is empty"); - TORCH_CHECK(b.dim() == 1 && b.size(0) == x.size(1), "b must be a vector with the same number of channels as x"); - TORCH_CHECK(up >= 1 && down >= 1, "up and down must be at least 1"); - - // Figure out how much shared memory is available on the device. - int maxSharedBytes = 0; - AT_CUDA_CHECK(cudaDeviceGetAttribute(&maxSharedBytes, cudaDevAttrMaxSharedMemoryPerBlockOptin, x.device().index())); - int sharedKB = maxSharedBytes >> 10; - - // Populate enough launch parameters to check if a CUDA kernel exists. - filtered_lrelu_kernel_params p; - p.up = up; - p.down = down; - p.fuShape = make_int2((int)fu.size(-1), fu.dim() == 2 ? (int)fu.size(0) : 0); // shape [n, 0] indicates separable filter. - p.fdShape = make_int2((int)fd.size(-1), fd.dim() == 2 ? (int)fd.size(0) : 0); - filtered_lrelu_kernel_spec test_spec = choose_filtered_lrelu_kernel(p, sharedKB); - if (!test_spec.exec) - { - // No kernel found - return empty tensors and indicate missing kernel with return code of -1. - return std::make_tuple(torch::Tensor(), torch::Tensor(), -1); - } - - // Input/output element size. - int64_t sz = (x.dtype() == torch::kHalf) ? 2 : 4; - - // Input sizes. - int64_t xw = (int)x.size(3); - int64_t xh = (int)x.size(2); - int64_t fut_w = (int)fu.size(-1) - 1; - int64_t fut_h = (int)fu.size(0) - 1; - int64_t fdt_w = (int)fd.size(-1) - 1; - int64_t fdt_h = (int)fd.size(0) - 1; - - // Logical size of upsampled buffer. - int64_t cw = xw * up + (px0 + px1) - fut_w; - int64_t ch = xh * up + (py0 + py1) - fut_h; - TORCH_CHECK(cw > fdt_w && ch > fdt_h, "upsampled buffer must be at least the size of downsampling filter"); - TORCH_CHECK(cw <= INT_MAX && ch <= INT_MAX, "upsampled buffer is too large"); - - // Compute output size and allocate. - int64_t yw = (cw - fdt_w + (down - 1)) / down; - int64_t yh = (ch - fdt_h + (down - 1)) / down; - TORCH_CHECK(yw > 0 && yh > 0, "output must be at least 1x1"); - TORCH_CHECK(yw <= INT_MAX && yh <= INT_MAX, "output is too large"); - torch::Tensor y = torch::empty({x.size(0), x.size(1), yh, yw}, x.options(), x.suggest_memory_format()); - - // Allocate sign tensor. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - int64_t sw_active = 0; // Active width of sign tensor. - if (writeSigns) - { - sw_active = yw * down - (down - 1) + fdt_w; // Active width in elements. - int64_t sh = yh * down - (down - 1) + fdt_h; // Height = active height. - int64_t sw = (sw_active + 15) & ~15; // Width = active width in elements, rounded up to multiple of 16. - TORCH_CHECK(sh <= INT_MAX && (sw >> 2) <= INT_MAX, "signs is too large"); - s = so = torch::empty({x.size(0), x.size(1), sh, sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - else if (readSigns) - sw_active = s.size(3) << 2; - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && s.size(3) <= INT_MAX, "signs is too large"); - } - - // Populate rest of CUDA kernel parameters. - p.x = x.data_ptr(); - p.y = y.data_ptr(); - p.b = b.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.fu = fu.data_ptr(); - p.fd = fd.data_ptr(); - p.pad0 = make_int2(px0, py0); - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.flip = (flip_filters) ? 1 : 0; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.yShape = make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3), (int)s.size(2)) : make_int2(0, 0); // Width is in bytes. Contiguous. - p.sOfs = make_int2(sx, sy); - p.swLimit = (sw_active + 3) >> 2; // Rounded up to bytes. - - // x, y, b strides are in bytes. - p.xStride = make_longlong4(sz * x.stride(3), sz * x.stride(2), sz * x.stride(1), sz * x.stride(0)); - p.yStride = make_longlong4(sz * y.stride(3), sz * y.stride(2), sz * y.stride(1), sz * y.stride(0)); - p.bStride = sz * b.stride(0); - - // fu, fd strides are in elements. - p.fuStride = make_longlong3(fu.stride(-1), fu.dim() == 2 ? fu.stride(0) : 0, 0); - p.fdStride = make_longlong3(fd.stride(-1), fd.dim() == 2 ? fd.stride(0) : 0, 0); - - // Determine if indices don't fit in int32. Support negative strides although Torch currently never produces those. - bool index64b = false; - if (std::abs(p.bStride * x.size(1)) > INT_MAX) index64b = true; - if (std::min(x.size(0) * p.xStride.w, 0ll) + std::min(x.size(1) * p.xStride.z, 0ll) + std::min(x.size(2) * p.xStride.y, 0ll) + std::min(x.size(3) * p.xStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(x.size(0) * p.xStride.w, 0ll) + std::max(x.size(1) * p.xStride.z, 0ll) + std::max(x.size(2) * p.xStride.y, 0ll) + std::max(x.size(3) * p.xStride.x, 0ll) > INT_MAX) index64b = true; - if (std::min(y.size(0) * p.yStride.w, 0ll) + std::min(y.size(1) * p.yStride.z, 0ll) + std::min(y.size(2) * p.yStride.y, 0ll) + std::min(y.size(3) * p.yStride.x, 0ll) < -INT_MAX) index64b = true; - if (std::max(y.size(0) * p.yStride.w, 0ll) + std::max(y.size(1) * p.yStride.z, 0ll) + std::max(y.size(2) * p.yStride.y, 0ll) + std::max(y.size(3) * p.yStride.x, 0ll) > INT_MAX) index64b = true; - if (s.numel() > INT_MAX) index64b = true; - - // Choose CUDA kernel. - filtered_lrelu_kernel_spec spec = { 0 }; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_cuda", [&] - { - if constexpr (sizeof(scalar_t) <= 4) // Exclude doubles. constexpr prevents template instantiation. - { - // Choose kernel based on index type, datatype and sign read/write modes. - if (!index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if (!index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - else if ( index64b && !writeSigns && !readSigns) spec = choose_filtered_lrelu_kernel(p, sharedKB); - } - }); - TORCH_CHECK(spec.exec, "internal error - CUDA kernel not found") // This should not happen because we tested earlier that kernel exists. - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = spec.numWarps * 32; - int gx = (p.yShape.x - 1) / spec.tileOut.x + 1; - int gy = (p.yShape.y - 1) / spec.tileOut.y + 1; - int gz = p.yShape.z * p.yShape.w; - - // Repeat multiple horizontal tiles in a CTA? - if (spec.xrep) - { - p.tilesXrep = spec.xrep; - p.tilesXdim = gx; - - gx = (gx + p.tilesXrep - 1) / p.tilesXrep; - std::swap(gx, gy); - } - else - { - p.tilesXrep = 0; - p.tilesXdim = 0; - } - - // Launch filter setup kernel. - AT_CUDA_CHECK(cudaLaunchKernel(spec.setup, 1, 1024, args, 0, at::cuda::getCurrentCUDAStream())); - - // Copy kernels to constant memory. - if ( writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - else if (!writeSigns && !readSigns) AT_CUDA_CHECK((copy_filters(at::cuda::getCurrentCUDAStream()))); - - // Set cache and shared memory configurations for main kernel. - AT_CUDA_CHECK(cudaFuncSetCacheConfig(spec.exec, cudaFuncCachePreferShared)); - if (spec.dynamicSharedKB) // Need dynamically allocated shared memory? - AT_CUDA_CHECK(cudaFuncSetAttribute(spec.exec, cudaFuncAttributeMaxDynamicSharedMemorySize, spec.dynamicSharedKB << 10)); - AT_CUDA_CHECK(cudaFuncSetSharedMemConfig(spec.exec, cudaSharedMemBankSizeFourByte)); - - // Launch main kernel. - const int maxSubGz = 65535; // CUDA maximum for block z dimension. - for (int zofs=0; zofs < gz; zofs += maxSubGz) // Do multiple launches if gz is too big. - { - p.blockZofs = zofs; - int subGz = std::min(maxSubGz, gz - zofs); - AT_CUDA_CHECK(cudaLaunchKernel(spec.exec, dim3(gx, gy, subGz), bx, args, spec.dynamicSharedKB << 10, at::cuda::getCurrentCUDAStream())); - } - - // Done. - return std::make_tuple(y, so, 0); -} - -//------------------------------------------------------------------------ - -static torch::Tensor filtered_lrelu_act(torch::Tensor x, torch::Tensor si, int sx, int sy, float gain, float slope, float clamp, bool writeSigns) -{ - // Set CUDA device. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - - // Validate arguments. - TORCH_CHECK(x.dim() == 4, "x must be rank 4"); - TORCH_CHECK(x.size(0) * x.size(1) <= INT_MAX && x.size(2) <= INT_MAX && x.size(3) <= INT_MAX, "x is too large"); - TORCH_CHECK(x.numel() > 0, "x is empty"); - TORCH_CHECK(x.dtype() == torch::kHalf || x.dtype() == torch::kFloat || x.dtype() == torch::kDouble, "x must be float16, float32 or float64"); - - // Output signs if we don't have sign input. - torch::Tensor so; - torch::Tensor s = si; - bool readSigns = !!s.numel(); - if (writeSigns) - { - int64_t sw = x.size(3); - sw = (sw + 15) & ~15; // Round to a multiple of 16 for coalescing. - s = so = torch::empty({x.size(0), x.size(1), x.size(2), sw >> 2}, x.options().dtype(torch::kUInt8), at::MemoryFormat::Contiguous); - } - - // Validate sign tensor if in use. - if (readSigns || writeSigns) - { - TORCH_CHECK(s.is_contiguous(), "signs must be contiguous"); - TORCH_CHECK(s.dtype() == torch::kUInt8, "signs must be uint8"); - TORCH_CHECK(s.device() == x.device(), "signs must reside on the same device as x"); - TORCH_CHECK(s.dim() == 4, "signs must be rank 4"); - TORCH_CHECK(s.size(0) == x.size(0) && s.size(1) == x.size(1), "signs must have same batch & channels as x"); - TORCH_CHECK(s.size(2) <= INT_MAX && (s.size(3) << 2) <= INT_MAX, "signs tensor is too large"); - } - - // Initialize CUDA kernel parameters. - filtered_lrelu_act_kernel_params p; - p.x = x.data_ptr(); - p.s = (readSigns || writeSigns) ? s.data_ptr() : 0; - p.gain = gain; - p.slope = slope; - p.clamp = clamp; - p.xShape = make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0)); - p.xStride = make_longlong4(x.stride(3), x.stride(2), x.stride(1), x.stride(0)); - p.sShape = (readSigns || writeSigns) ? make_int2((int)s.size(3) << 2, (int)s.size(2)) : make_int2(0, 0); // Width is in elements. Contiguous. - p.sOfs = make_int2(sx, sy); - - // Choose CUDA kernel. - void* func = 0; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "filtered_lrelu_act_cuda", [&] - { - if (writeSigns) - func = choose_filtered_lrelu_act_kernel(); - else if (readSigns) - func = choose_filtered_lrelu_act_kernel(); - else - func = choose_filtered_lrelu_act_kernel(); - }); - TORCH_CHECK(func, "internal error - CUDA kernel not found"); - - // Launch CUDA kernel. - void* args[] = {&p}; - int bx = 128; // 4 warps per block. - - // Logical size of launch = writeSigns ? p.s : p.x - uint32_t gx = writeSigns ? p.sShape.x : p.xShape.x; - uint32_t gy = writeSigns ? p.sShape.y : p.xShape.y; - uint32_t gz = p.xShape.z * p.xShape.w; // Same as in p.sShape if signs are in use. - gx = (gx - 1) / bx + 1; - - // Make sure grid y and z dimensions are within CUDA launch limits. Kernel loops internally to do the rest. - const uint32_t gmax = 65535; - gy = std::min(gy, gmax); - gz = std::min(gz, gmax); - - // Launch. - AT_CUDA_CHECK(cudaLaunchKernel(func, dim3(gx, gy, gz), bx, args, 0, at::cuda::getCurrentCUDAStream())); - return so; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("filtered_lrelu", &filtered_lrelu); // The whole thing. - m.def("filtered_lrelu_act_", &filtered_lrelu_act); // Activation and sign tensor handling only. Modifies data tensor in-place. -} - -//------------------------------------------------------------------------ diff --git a/spaces/Andy1621/uniformer_image_detection/configs/gn+ws/README.md b/spaces/Andy1621/uniformer_image_detection/configs/gn+ws/README.md deleted file mode 100644 index 988fb13ee43156cce75886af0e3b44eaf97a0a04..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/gn+ws/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# Weight Standardization - -## Introduction - -[ALGORITHM] - -``` -@article{weightstandardization, - author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, - title = {Weight Standardization}, - journal = {arXiv preprint arXiv:1903.10520}, - year = {2019}, -} -``` - -## Results and Models - -Faster R-CNN - -| Backbone | Style | Normalization | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -|:---------:|:-------:|:-------------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:| -| R-50-FPN | pytorch | GN+WS | 1x | 5.9 | 11.7 | 39.7 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130-613d9fe2.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r50_fpn_gn_ws-all_1x_coco/faster_rcnn_r50_fpn_gn_ws-all_1x_coco_20200130_210936.log.json) | -| R-101-FPN | pytorch | GN+WS | 1x | 8.9 | 9.0 | 41.7 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205-a93b0d75.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco/faster_rcnn_r101_fpn_gn_ws-all_1x_coco_20200205_232146.log.json) | -| X-50-32x4d-FPN | pytorch | GN+WS | 1x | 7.0 | 10.3 | 40.7 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203-839c5d9d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco_20200203_220113.log.json) | -| X-101-32x4d-FPN | pytorch | GN+WS | 1x | 10.8 | 7.6 | 42.1 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212-27da1bc2.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco/faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco_20200212_195302.log.json) | - -Mask R-CNN - -| Backbone | Style | Normalization | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -|:---------:|:-------:|:-------------:|:---------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:| -| R-50-FPN | pytorch | GN+WS | 2x | 7.3 | 10.5 | 40.6 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226-16acb762.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco/mask_rcnn_r50_fpn_gn_ws-all_2x_coco_20200226_062128.log.json) | -| R-101-FPN | pytorch | GN+WS | 2x | 10.3 | 8.6 | 42.0 | 37.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212-ea357cd9.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_2x_coco/mask_rcnn_r101_fpn_gn_ws-all_2x_coco_20200212_213627.log.json) | -| X-50-32x4d-FPN | pytorch | GN+WS | 2x | 8.4 | 9.3 | 41.1 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216-649fdb6f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco_20200216_201500.log.json) | -| X-101-32x4d-FPN | pytorch | GN+WS | 2x | 12.2 | 7.1 | 42.1 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319-33fb95b5.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco_20200319_104101.log.json) | -| R-50-FPN | pytorch | GN+WS | 20-23-24e | 7.3 | - | 41.1 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213-487d1283.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco_20200213_035123.log.json) | -| R-101-FPN | pytorch | GN+WS | 20-23-24e | 10.3 | - | 43.1 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213-57b5a50f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco_20200213_130142.log.json) | -| X-50-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 8.4 | - | 42.1 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226-969bcb2c.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200226_093732.log.json) | -| X-101-32x4d-FPN | pytorch | GN+WS | 20-23-24e | 12.2 | - | 42.7 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316-e6cd35ef.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gn%2Bws/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco/mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco_20200316_013741.log.json) | - -Note: - -- GN+WS requires about 5% more memory than GN, and it is only 5% slower than GN. -- In the paper, a 20-23-24e lr schedule is used instead of 2x. -- The X-50-GN and X-101-GN pretrained models are also shared by the authors. diff --git a/spaces/Andy1621/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py deleted file mode 100644 index 104d6d43bd958d49f75d54965b326ebac29ae330..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://regnetx_12gf', - backbone=dict( - type='RegNet', - arch='regnetx_12gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[224, 448, 896, 2240], - out_channels=256, - num_outs=5)) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fcos.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fcos.py deleted file mode 100644 index 58485c1864a11a66168b7597f345ea759ce20551..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/fcos.py +++ /dev/null @@ -1,17 +0,0 @@ -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class FCOS(SingleStageDetector): - """Implementation of `FCOS `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/deeplabv3_unet_s5-d16.py b/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/deeplabv3_unet_s5-d16.py deleted file mode 100644 index 0cd262999d8b2cb8e14a5c32190ae73f479d8e81..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/deeplabv3_unet_s5-d16.py +++ /dev/null @@ -1,50 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained=None, - backbone=dict( - type='UNet', - in_channels=3, - base_channels=64, - num_stages=5, - strides=(1, 1, 1, 1, 1), - enc_num_convs=(2, 2, 2, 2, 2), - dec_num_convs=(2, 2, 2, 2), - downsamples=(True, True, True, True), - enc_dilations=(1, 1, 1, 1, 1), - dec_dilations=(1, 1, 1, 1), - with_cp=False, - conv_cfg=None, - norm_cfg=norm_cfg, - act_cfg=dict(type='ReLU'), - upsample_cfg=dict(type='InterpConv'), - norm_eval=False), - decode_head=dict( - type='ASPPHead', - in_channels=64, - in_index=4, - channels=16, - dilations=(1, 12, 24, 36), - dropout_ratio=0.1, - num_classes=2, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=128, - in_index=3, - channels=64, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=2, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='slide', crop_size=256, stride=170)) diff --git a/spaces/AnimalEquality/chatbot/lv_recipe_chatbot/__init__.py b/spaces/AnimalEquality/chatbot/lv_recipe_chatbot/__init__.py deleted file mode 100644 index f102a9cadfa89ce554b3b26d2b90bfba2e05273c..0000000000000000000000000000000000000000 --- a/spaces/AnimalEquality/chatbot/lv_recipe_chatbot/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "0.0.1" diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/gradio_depth2image.py b/spaces/Anonymous-sub/Rerender/ControlNet/gradio_depth2image.py deleted file mode 100644 index ee678999ae6033c18a5026bc5f6286d0364c7851..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/gradio_depth2image.py +++ /dev/null @@ -1,98 +0,0 @@ -from share import * -import config - -import cv2 -import einops -import gradio as gr -import numpy as np -import torch -import random - -from pytorch_lightning import seed_everything -from annotator.util import resize_image, HWC3 -from annotator.midas import MidasDetector -from cldm.model import create_model, load_state_dict -from cldm.ddim_hacked import DDIMSampler - - -apply_midas = MidasDetector() - -model = create_model('./models/cldm_v15.yaml').cpu() -model.load_state_dict(load_state_dict('./models/control_sd15_depth.pth', location='cuda')) -model = model.cuda() -ddim_sampler = DDIMSampler(model) - - -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta): - with torch.no_grad(): - input_image = HWC3(input_image) - detected_map, _ = apply_midas(resize_image(input_image, detect_resolution)) - detected_map = HWC3(detected_map) - img = resize_image(input_image, image_resolution) - H, W, C = img.shape - - detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR) - - control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0 - control = torch.stack([control for _ in range(num_samples)], dim=0) - control = einops.rearrange(control, 'b h w c -> b c h w').clone() - - if seed == -1: - seed = random.randint(0, 65535) - seed_everything(seed) - - if config.save_memory: - model.low_vram_shift(is_diffusing=False) - - cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]} - un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]} - shape = (4, H // 8, W // 8) - - if config.save_memory: - model.low_vram_shift(is_diffusing=True) - - model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01 - samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, - shape, cond, verbose=False, eta=eta, - unconditional_guidance_scale=scale, - unconditional_conditioning=un_cond) - - if config.save_memory: - model.low_vram_shift(is_diffusing=False) - - x_samples = model.decode_first_stage(samples) - x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8) - - results = [x_samples[i] for i in range(num_samples)] - return [detected_map] + results - - -block = gr.Blocks().queue() -with block: - with gr.Row(): - gr.Markdown("## Control Stable Diffusion with Depth Maps") - with gr.Row(): - with gr.Column(): - input_image = gr.Image(source='upload', type="numpy") - prompt = gr.Textbox(label="Prompt") - run_button = gr.Button(label="Run") - with gr.Accordion("Advanced options", open=False): - num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) - image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64) - strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) - guess_mode = gr.Checkbox(label='Guess Mode', value=False) - detect_resolution = gr.Slider(label="Depth Resolution", minimum=128, maximum=1024, value=384, step=1) - ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) - scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1) - seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True) - eta = gr.Number(label="eta (DDIM)", value=0.0) - a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed') - n_prompt = gr.Textbox(label="Negative Prompt", - value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') - with gr.Column(): - result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta] - run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) - - -block.launch(server_name='0.0.0.0') diff --git a/spaces/Ariharasudhan/YoloV5/utils/docker/Dockerfile b/spaces/Ariharasudhan/YoloV5/utils/docker/Dockerfile deleted file mode 100644 index a5035c6abc33c5e479a813b9733844cf6eab7a08..0000000000000000000000000000000000000000 --- a/spaces/Ariharasudhan/YoloV5/utils/docker/Dockerfile +++ /dev/null @@ -1,65 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 -# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference - -# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.10-py3 -RUN rm -rf /opt/pytorch # remove 1.2GB dir - -# Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ - -# Install linux packages -RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx - -# Install pip packages -COPY requirements.txt . -RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext # torch torchvision -RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ - 'opencv-python<4.6.0.66' \ - --extra-index-url https://download.pytorch.org/whl/cu113 - -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - -# Set environment variables -ENV OMP_NUM_THREADS=8 - - -# Usage Examples ------------------------------------------------------------------------------------------------------- - -# Build and Push -# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t - -# Pull and Run -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t - -# Pull and Run with local directory access -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t - -# Kill all -# sudo docker kill $(sudo docker ps -q) - -# Kill all image-based -# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) - -# DockerHub tag update -# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew - -# Clean up -# docker system prune -a --volumes - -# Update Ubuntu drivers -# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ - -# DDP test -# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 - -# GCP VM from Image -# docker.io/ultralytics/yolov5:latest diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/variables.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/variables.py deleted file mode 100644 index 667f2f26ff2182ecdfc5b809ba97a6cf1d1be13a..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/dotenv/variables.py +++ /dev/null @@ -1,86 +0,0 @@ -import re -from abc import ABCMeta, abstractmethod -from typing import Iterator, Mapping, Optional, Pattern - -_posix_variable: Pattern[str] = re.compile( - r""" - \$\{ - (?P[^\}:]*) - (?::- - (?P[^\}]*) - )? - \} - """, - re.VERBOSE, -) - - -class Atom(metaclass=ABCMeta): - def __ne__(self, other: object) -> bool: - result = self.__eq__(other) - if result is NotImplemented: - return NotImplemented - return not result - - @abstractmethod - def resolve(self, env: Mapping[str, Optional[str]]) -> str: ... - - -class Literal(Atom): - def __init__(self, value: str) -> None: - self.value = value - - def __repr__(self) -> str: - return f"Literal(value={self.value})" - - def __eq__(self, other: object) -> bool: - if not isinstance(other, self.__class__): - return NotImplemented - return self.value == other.value - - def __hash__(self) -> int: - return hash((self.__class__, self.value)) - - def resolve(self, env: Mapping[str, Optional[str]]) -> str: - return self.value - - -class Variable(Atom): - def __init__(self, name: str, default: Optional[str]) -> None: - self.name = name - self.default = default - - def __repr__(self) -> str: - return f"Variable(name={self.name}, default={self.default})" - - def __eq__(self, other: object) -> bool: - if not isinstance(other, self.__class__): - return NotImplemented - return (self.name, self.default) == (other.name, other.default) - - def __hash__(self) -> int: - return hash((self.__class__, self.name, self.default)) - - def resolve(self, env: Mapping[str, Optional[str]]) -> str: - default = self.default if self.default is not None else "" - result = env.get(self.name, default) - return result if result is not None else "" - - -def parse_variables(value: str) -> Iterator[Atom]: - cursor = 0 - - for match in _posix_variable.finditer(value): - (start, end) = match.span() - name = match["name"] - default = match["default"] - - if start > cursor: - yield Literal(value=value[cursor:start]) - - yield Variable(name=name, default=default) - cursor = end - - length = len(value) - if cursor < length: - yield Literal(value=value[cursor:length]) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_manylinux.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_manylinux.py deleted file mode 100644 index 4c379aa6f69ff56c8f19612002c6e3e939ea6012..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_manylinux.py +++ /dev/null @@ -1,301 +0,0 @@ -import collections -import functools -import os -import re -import struct -import sys -import warnings -from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple - - -# Python does not provide platform information at sufficient granularity to -# identify the architecture of the running executable in some cases, so we -# determine it dynamically by reading the information from the running -# process. This only applies on Linux, which uses the ELF format. -class _ELFFileHeader: - # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - class _InvalidELFFileHeader(ValueError): - """ - An invalid ELF file header was found. - """ - - ELF_MAGIC_NUMBER = 0x7F454C46 - ELFCLASS32 = 1 - ELFCLASS64 = 2 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 - EM_386 = 3 - EM_S390 = 22 - EM_ARM = 40 - EM_X86_64 = 62 - EF_ARM_ABIMASK = 0xFF000000 - EF_ARM_ABI_VER5 = 0x05000000 - EF_ARM_ABI_FLOAT_HARD = 0x00000400 - - def __init__(self, file: IO[bytes]) -> None: - def unpack(fmt: str) -> int: - try: - data = file.read(struct.calcsize(fmt)) - result: Tuple[int, ...] = struct.unpack(fmt, data) - except struct.error: - raise _ELFFileHeader._InvalidELFFileHeader() - return result[0] - - self.e_ident_magic = unpack(">I") - if self.e_ident_magic != self.ELF_MAGIC_NUMBER: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_class = unpack("B") - if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_data = unpack("B") - if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: - raise _ELFFileHeader._InvalidELFFileHeader() - self.e_ident_version = unpack("B") - self.e_ident_osabi = unpack("B") - self.e_ident_abiversion = unpack("B") - self.e_ident_pad = file.read(7) - format_h = "H" - format_i = "I" - format_q = "Q" - format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q - self.e_type = unpack(format_h) - self.e_machine = unpack(format_h) - self.e_version = unpack(format_i) - self.e_entry = unpack(format_p) - self.e_phoff = unpack(format_p) - self.e_shoff = unpack(format_p) - self.e_flags = unpack(format_i) - self.e_ehsize = unpack(format_h) - self.e_phentsize = unpack(format_h) - self.e_phnum = unpack(format_h) - self.e_shentsize = unpack(format_h) - self.e_shnum = unpack(format_h) - self.e_shstrndx = unpack(format_h) - - -def _get_elf_header() -> Optional[_ELFFileHeader]: - try: - with open(sys.executable, "rb") as f: - elf_header = _ELFFileHeader(f) - except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): - return None - return elf_header - - -def _is_linux_armhf() -> bool: - # hard-float ABI can be detected from the ELF header of the running - # process - # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_ARM - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABIMASK - ) == elf_header.EF_ARM_ABI_VER5 - result &= ( - elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD - ) == elf_header.EF_ARM_ABI_FLOAT_HARD - return result - - -def _is_linux_i686() -> bool: - elf_header = _get_elf_header() - if elf_header is None: - return False - result = elf_header.e_ident_class == elf_header.ELFCLASS32 - result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB - result &= elf_header.e_machine == elf_header.EM_386 - return result - - -def _have_compatible_abi(arch: str) -> bool: - if arch == "armv7l": - return _is_linux_armhf() - if arch == "i686": - return _is_linux_i686() - return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} - - -# If glibc ever changes its major version, we need to know what the last -# minor version was, so we can build the complete list of all versions. -# For now, guess what the highest minor version might be, assume it will -# be 50 for testing. Once this actually happens, update the dictionary -# with the actual value. -_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) - - -class _GLibCVersion(NamedTuple): - major: int - minor: int - - -def _glibc_version_string_confstr() -> Optional[str]: - """ - Primary implementation of glibc_version_string using os.confstr. - """ - # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely - # to be broken or missing. This strategy is used in the standard library - # platform module. - # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 - try: - # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". - version_string = os.confstr("CS_GNU_LIBC_VERSION") - assert version_string is not None - _, version = version_string.split() - except (AssertionError, AttributeError, OSError, ValueError): - # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... - return None - return version - - -def _glibc_version_string_ctypes() -> Optional[str]: - """ - Fallback implementation of glibc_version_string using ctypes. - """ - try: - import ctypes - except ImportError: - return None - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - # - # We must also handle the special case where the executable is not a - # dynamically linked executable. This can occur when using musl libc, - # for example. In this situation, dlopen() will error, leading to an - # OSError. Interestingly, at least in the case of musl, there is no - # errno set on the OSError. The single string argument used to construct - # OSError comes from libc itself and is therefore not portable to - # hard code here. In any case, failure to call dlopen() means we - # can proceed, so we bail on our attempt. - try: - process_namespace = ctypes.CDLL(None) - except OSError: - return None - - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str: str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -def _glibc_version_string() -> Optional[str]: - """Returns glibc version string, or None if not using glibc.""" - return _glibc_version_string_confstr() or _glibc_version_string_ctypes() - - -def _parse_glibc_version(version_str: str) -> Tuple[int, int]: - """Parse glibc version. - - We use a regexp instead of str.split because we want to discard any - random junk that might come after the minor version -- this might happen - in patched/forked versions of glibc (e.g. Linaro's version of glibc - uses version strings like "2.20-2014.11"). See gh-3588. - """ - m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) - if not m: - warnings.warn( - "Expected glibc version with 2 components major.minor," - " got: %s" % version_str, - RuntimeWarning, - ) - return -1, -1 - return int(m.group("major")), int(m.group("minor")) - - -@functools.lru_cache() -def _get_glibc_version() -> Tuple[int, int]: - version_str = _glibc_version_string() - if version_str is None: - return (-1, -1) - return _parse_glibc_version(version_str) - - -# From PEP 513, PEP 600 -def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: - sys_glibc = _get_glibc_version() - if sys_glibc < version: - return False - # Check for presence of _manylinux module. - try: - import _manylinux # noqa - except ImportError: - return True - if hasattr(_manylinux, "manylinux_compatible"): - result = _manylinux.manylinux_compatible(version[0], version[1], arch) - if result is not None: - return bool(result) - return True - if version == _GLibCVersion(2, 5): - if hasattr(_manylinux, "manylinux1_compatible"): - return bool(_manylinux.manylinux1_compatible) - if version == _GLibCVersion(2, 12): - if hasattr(_manylinux, "manylinux2010_compatible"): - return bool(_manylinux.manylinux2010_compatible) - if version == _GLibCVersion(2, 17): - if hasattr(_manylinux, "manylinux2014_compatible"): - return bool(_manylinux.manylinux2014_compatible) - return True - - -_LEGACY_MANYLINUX_MAP = { - # CentOS 7 w/ glibc 2.17 (PEP 599) - (2, 17): "manylinux2014", - # CentOS 6 w/ glibc 2.12 (PEP 571) - (2, 12): "manylinux2010", - # CentOS 5 w/ glibc 2.5 (PEP 513) - (2, 5): "manylinux1", -} - - -def platform_tags(linux: str, arch: str) -> Iterator[str]: - if not _have_compatible_abi(arch): - return - # Oldest glibc to be supported regardless of architecture is (2, 17). - too_old_glibc2 = _GLibCVersion(2, 16) - if arch in {"x86_64", "i686"}: - # On x86/i686 also oldest glibc to be supported is (2, 5). - too_old_glibc2 = _GLibCVersion(2, 4) - current_glibc = _GLibCVersion(*_get_glibc_version()) - glibc_max_list = [current_glibc] - # We can assume compatibility across glibc major versions. - # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 - # - # Build a list of maximum glibc versions so that we can - # output the canonical list of all glibc from current_glibc - # down to too_old_glibc2, including all intermediary versions. - for glibc_major in range(current_glibc.major - 1, 1, -1): - glibc_minor = _LAST_GLIBC_MINOR[glibc_major] - glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) - for glibc_max in glibc_max_list: - if glibc_max.major == too_old_glibc2.major: - min_minor = too_old_glibc2.minor - else: - # For other glibc major versions oldest supported is (x, 0). - min_minor = -1 - for glibc_minor in range(glibc_max.minor, min_minor, -1): - glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) - tag = "manylinux_{}_{}".format(*glibc_version) - if _is_compatible(tag, arch, glibc_version): - yield linux.replace("linux", tag) - # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. - if glibc_version in _LEGACY_MANYLINUX_MAP: - legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] - if _is_compatible(legacy_tag, arch, glibc_version): - yield linux.replace("linux", legacy_tag) diff --git a/spaces/Audio-AGI/AudioSep/models/CLAP/training/logger.py b/spaces/Audio-AGI/AudioSep/models/CLAP/training/logger.py deleted file mode 100644 index ac4634970fae6aacde2b7b808355dbd50c90ce73..0000000000000000000000000000000000000000 --- a/spaces/Audio-AGI/AudioSep/models/CLAP/training/logger.py +++ /dev/null @@ -1,30 +0,0 @@ -import logging - - -def setup_logging(log_file, level, include_host=False): - if include_host: - import socket - - hostname = socket.gethostname() - formatter = logging.Formatter( - f"%(asctime)s | {hostname} | %(levelname)s | %(message)s", - datefmt="%Y-%m-%d,%H:%M:%S", - ) - else: - formatter = logging.Formatter( - "%(asctime)s | %(levelname)s | %(message)s", datefmt="%Y-%m-%d,%H:%M:%S" - ) - - logging.root.setLevel(level) - loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict] - for logger in loggers: - logger.setLevel(level) - - stream_handler = logging.StreamHandler() - stream_handler.setFormatter(formatter) - logging.root.addHandler(stream_handler) - - if log_file: - file_handler = logging.FileHandler(filename=log_file) - file_handler.setFormatter(formatter) - logging.root.addHandler(file_handler) diff --git a/spaces/Audio-AGI/WavJourney/scripts/download_models.py b/spaces/Audio-AGI/WavJourney/scripts/download_models.py deleted file mode 100644 index 51dda13bea02b8453cf7958e34cc9571c208c0df..0000000000000000000000000000000000000000 --- a/spaces/Audio-AGI/WavJourney/scripts/download_models.py +++ /dev/null @@ -1,32 +0,0 @@ -import yaml -import os - -# Read the YAML file -with open('config.yaml', 'r') as file: - config = yaml.safe_load(file) - -# Extract values for each application -ttm_model_size = config['AudioCraft']['ttm_model_size'] -tta_model_size = config['AudioCraft']['tta_model_size'] - -# Download nltk -import nltk -nltk.download('punkt') - -# Downloading the TTS models -print('Step 1: Downloading TTS model ...') -os.system(f'conda run --live-stream -n WavJourney python -c \'from transformers import BarkModel; BarkModel.from_pretrained("suno/bark")\'') - -print('Step 2: Downloading TTA model ...') -os.system(f'conda run --live-stream -n WavJourney python -c \'from audiocraft.models import AudioGen; tta_model = AudioGen.get_pretrained("facebook/audiogen-{tta_model_size}")\'') - -print('Step 3: Downloading TTM model ...') -os.system(f'conda run --live-stream -n WavJourney python -c \'from audiocraft.models import MusicGen; tta_model = MusicGen.get_pretrained("facebook/musicgen-{ttm_model_size}")\'') - -print('Step 4: Downloading SR model ...') -os.system(f'conda run --live-stream -n WavJourney python -c \'from voicefixer import VoiceFixer; vf = VoiceFixer()\'') - -print('Step 5: Downloading VP model ...') -os.system(f'conda run --live-stream -n WavJourney python -c \'from VoiceParser.model import VoiceParser; vp = VoiceParser(device="cpu")\'') - -print('All models successfully downloaded!') diff --git a/spaces/Benson/text-generation/Examples/Cmo Conseguir Sobre l Mod Apk Descarga Apkpure.md b/spaces/Benson/text-generation/Examples/Cmo Conseguir Sobre l Mod Apk Descarga Apkpure.md deleted file mode 100644 index e4f30667bf4995215f6a691be192ad54a16ec721..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cmo Conseguir Sobre l Mod Apk Descarga Apkpure.md +++ /dev/null @@ -1,33 +0,0 @@ - -

Cómo superarlo Mod APK Descargar Apkpure: Cómo escalar una montaña con un martillo y una olla

-

Si usted está buscando un juego que pondrá a prueba su paciencia, habilidad y cordura, es posible que desee probar Cómo superarlo con Bennett Foddy. Este es un juego que te hará rabiar, reír, llorar, y tal vez incluso aprender algo sobre ti mismo. Y si quieres disfrutar de este juego sin pagar por él o tratar con anuncios, es posible que desee descargar el mod APK de Apkpure. En este artículo, te diremos todo lo que necesitas saber sobre este juego, por qué deberías descargar el mod APK de Apkpure, cómo instalarlo en tu dispositivo Android y cómo jugarlo.

-

¿Qué es superar con Bennett Foddy?

-

Getting Over It with Bennett Foddy es un juego creado por Bennett Foddy, un diseñador de juegos y filósofo conocido por crear juegos que son intencionalmente frustrantes y difíciles. Algunos de sus otros juegos incluyen QWOP, GIRP, y CLOP. Cómo superarlo con Bennett Foddy fue lanzado en 2017 para Windows, macOS, iOS y Android.

-

cómo conseguir sobre él mod apk descarga apkpure


Download Zip --->>> https://bltlly.com/2v6J2K



-

Un juego de escalada castigador con un toque

-

La premisa del juego es simple: eres un hombre llamado Diógenes que está atrapado en una olla de metal con solo un martillo. Su objetivo es subir a una enorme montaña que está hecha de varios objetos, como rocas, árboles, muebles, tuberías, coches, etc. El giro es que no hay sistema de ahorro, sin puntos de control, sin botón de deshacer. Si cometes un error o pierdes el control, puedes caer hasta el fondo y perder todo tu progreso. Y confía en nosotros, caerás. Mucho.

-

Un homenaje a Jazzuo 2002 B-Game clásico 'Sexy Hiking'

- -

Un comentario filosófico sobre la frustración y la perseverancia

-

Mientras juegas, escucharás la voz de Bennett Foddy narrando tu viaje. Comentará tus acciones, te dará un poco de historia y trivia sobre el juego y sus influencias, compartirá algunas citas y anécdotas de personas famosas que se enfrentaron a la adversidad y el fracaso, y a veces se burlan o te animan. Su voz es tranquila y relajante, pero también sarcástica e irónica. Él te hará preguntarte por qué estás jugando a este juego, qué significa fallar y tener éxito, y cómo lidiar con la frustración y la perseverancia.

-

¿Por qué descargar el mod APK de Apkpure?

-

Si usted está interesado en jugar este juego en su dispositivo Android, usted tiene dos opciones: usted puede comprar desde el Google Play más y más difícil. Encontrará objetos resbaladizos, inestables, afilados o en movimiento. También se enfrentará a brechas, acantilados, salientes y callejones sin salida. Algunos de los obstáculos están diseñados para engañarte o trollearte, como caminos falsos, trampas ocultas o cambios repentinos. También tendrás que lidiar con el viento, la lluvia y la oscuridad. El juego se divide en varias secciones, cada una con su propio tema y nivel de dificultad. Algunas de las secciones llevan el nombre de famosas montañas, como Annapurna, Everest o Olympus Mons.

-

Los consejos y trucos para superar el juego

-

El juego es muy duro y frustrante, pero no imposible. Aquí hay algunos consejos y trucos que pueden ayudarte a superar el juego:

- -

Superarlo con Bennett Foddy es un juego único y desafiante que te hará experimentar una variedad de emociones y pensamientos. Es un juego que pondrá a prueba tu paciencia, habilidad y cordura. Es un juego que te hará rabiar, reír, llorar, y tal vez incluso aprender algo sobre ti mismo. Y si quieres jugar este juego en tu dispositivo Android de forma gratuita y sin anuncios, puede descargar el mod APK de Apkpure. Esperamos que este artículo te haya ayudado a entender de qué se trata este juego, por qué deberías descargar el mod APK de Apkpure, cómo instalarlo en tu dispositivo y cómo jugarlo.

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes acerca de Cómo superarlo con Bennett Foddy:

-

-

Q: ¿Cuánto tiempo se tarda en vencer el juego?

-

A: Depende de tu nivel de habilidad y suerte. Algunas personas pueden ganar el juego en menos de una hora, mientras que otras pueden tardar días o semanas. El récord mundial de velocidad del juego es de 1 minuto y 56 segundos.

-

P: ¿Qué sucede cuando se llega a la cima de la montaña?

-

A: No te lo vamos a estropear, pero digamos que hay una sorpresa esperándote al final del juego.

-

Q: ¿Quién es Diógenes?

-

A: Diógenes fue un filósofo griego que vivió en el siglo IV a.C. Era conocido por su estilo de vida no convencional y ascético. Rechazó las posesiones materiales y las normas sociales, y vivió en una gran jarra de cerámica en Atenas. También era conocido por sus comentarios ingeniosos y sarcásticos.

-

Q: ¿Quién es Jazzuo?

-

A: Jazzuo es un desarrollador de juegos indie checo que hizo Sexy Hiking en 2002.

-

Q: ¿Quién es Bennett Foddy?

-

A: Bennett Foddy es un diseñador de juegos y filósofo australiano-americano que hizo Getting Over It with Bennett Foddy en 2017.

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Archivo Pdf De 1 Mb.md b/spaces/Benson/text-generation/Examples/Descargar Archivo Pdf De 1 Mb.md deleted file mode 100644 index 500492c36a0fc79e47d4902a939ac1202b1928cf..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Archivo Pdf De 1 Mb.md +++ /dev/null @@ -1,159 +0,0 @@ - -

Cómo descargar un archivo PDF de 1 MB en minutos

-

Los archivos PDF son uno de los formatos de documentos más populares y versátiles del mundo digital. Pueden contener texto, imágenes, gráficos, enlaces, formularios, anotaciones y más. También pueden preservar el diseño y la apariencia de su documento en diferentes dispositivos y plataformas.

-

Pero a veces, es posible que tenga que descargar un pequeño archivo PDF de solo 1 MB o menos de tamaño. Quizás tenga un ancho de banda o espacio de almacenamiento limitados en su dispositivo. Quizás quiera ahorrar tiempo y datos al descargar un documento. Tal vez necesite enviar o recibir un documento por correo electrónico o aplicación de mensajería que tenga un límite de tamaño de archivo.

-

descargar archivo pdf de 1 mb


DOWNLOAD ———>>> https://bltlly.com/2v6Jgs



-

Cualquiera que sea su razón, descargar un archivo PDF de 1 MB no es tan difícil como podría pensar. En este artículo, le mostraremos cómo descargar un archivo PDF de 1 MB desde Internet, cómo comprimir un archivo PDF más grande a 1 MB o menos, y cómo abrir y ver un archivo PDF de 1 MB en su dispositivo.

-

¿Qué es un archivo PDF?

-

PDF significa Formato de documento portátil. Es un formato de archivo que fue creado por Adobe en 1993 para permitir a los usuarios compartir e imprimir documentos sin perder el formato original. Los archivos PDF pueden ser abiertos por varios programas y aplicaciones, como Adobe Acrobat Reader, Google Chrome, Microsoft Edge, y más.

-

Algunos de los beneficios de los archivos PDF sobre otros formatos son:

-
    -
  • Son compatibles con diferentes sistemas operativos y dispositivos
  • -
  • Pueden proteger el contenido y la integridad de su documento con cifrado y contraseñas
  • -
  • Pueden comprimir grandes cantidades de datos sin comprometer la calidad
  • -
  • Pueden admitir funciones interactivas como hipervínculos, marcadores, anotaciones y formularios
  • -
  • Se pueden convertir fácilmente a o desde otros formatos como Word, Excel, PowerPoint, JPG, PNG y más
  • -
-

¿Por qué necesita descargar un archivo PDF de 1 MB?

- -
    -
  • Desea descargar un artículo corto, informe, folleto, folleto o curriculum vitae que está disponible en línea como un archivo PDF
  • -
  • Desea descargar una muestra o vista previa de un documento o libro más largo que se ofrece como un archivo PDF
  • -
  • Desea descargar un formulario o solicitud que necesita llenar y enviar como archivo PDF
  • -
  • Desea descargar un certificado, recibo, factura o ticket que se emite como un archivo PDF
  • -
  • Desea descargar un cupón, vale o código de descuento que se proporciona como un archivo PDF
  • -
-

Sin embargo, descargar un pequeño archivo PDF no siempre es fácil o conveniente. A veces, puede encontrar algunos desafíos o limitaciones al intentar descargar un archivo PDF grande. Por ejemplo:

-
    -
  • Tiene una conexión a Internet lenta o inestable que hace que la descarga de archivos grandes tome demasiado tiempo o falle
  • -
  • Tiene un plan de datos limitado o cuota que hace que la descarga de archivos grandes consuma demasiados datos o incurra en cargos adicionales
  • -
  • Tiene un espacio de almacenamiento o memoria bajo en su dispositivo que hace imposible la descarga de archivos grandes o causa errores
  • -
  • Tienes un firewall o antivirus estricto que bloquea o restringe la descarga de archivos grandes de fuentes desconocidas
  • -
  • Tiene un límite de tamaño de archivo o restricción en su aplicación de correo electrónico o mensajería que le impide enviar o recibir archivos grandes como archivos adjuntos
  • -
-

Cómo descargar un archivo PDF de 1 MB desde Internet

-

Si necesita descargar un archivo PDF de 1 MB desde Internet, primero debe encontrar y acceder a un archivo PDF de 1 MB en línea. Hay muchas fuentes o sitios web que ofrecen archivos PDF gratuitos o de bajo costo para diversos fines y temas. Algunos de ellos son:

-
    -
  • PDF Drive: Una biblioteca en línea gratuita que tiene más de 90 millones de archivos PDF para descarga gratuita
  • -
  • PDF Books World: Una plataforma en línea gratuita que tiene miles de libros PDF para descargar gratis
  • - -
  • PDF Zone: Un recurso en línea gratuito que tiene docenas de guías en PDF y tutoriales para descarga gratuita
  • -
  • Archivo PDF: Un archivo en línea gratuito que tiene millones de documentos PDF para descarga gratuita
  • -

Una vez que encuentre un archivo PDF de 1 MB que desea descargar, debe descargarlo y guardarlo en su dispositivo. Los pasos pueden variar dependiendo de la fuente o sitio web, pero generalmente, son:

-
    -
  1. Haga clic en el enlace o icono del archivo PDF para abrirlo en su navegador o software
  2. -
  3. Busque un botón de descarga u opción en la página o barra de herramientas
  4. -
  5. Haga clic en el botón de descarga u opción y elija una ubicación o carpeta en su dispositivo donde desea guardar el archivo PDF
  6. -
  7. Espere a que la descarga se complete y compruebe si el archivo PDF se guarda correctamente en su dispositivo
  8. -
-

Si encuentra algún problema o error al descargar un archivo PDF de 1 MB, puede probar algunas de estas soluciones:

-

-
    -
  • Actualizar la página o recargar el archivo PDF
  • -
  • Compruebe su conexión a Internet y la velocidad
  • -
  • Borrar la caché y las cookies del navegador
  • -
  • Desactivar o ajustar la configuración del firewall o antivirus
  • -
  • Utilice un navegador o software diferente
  • -
  • Póngase en contacto con la fuente o el sitio web para obtener apoyo o comentarios
  • -
-

Cómo comprimir un archivo PDF más grande a 1 MB o menos

-

A veces, es posible que no pueda encontrar un archivo PDF de 1 MB que se adapte a sus necesidades. Es posible que tenga un archivo PDF más grande que desea descargar, pero excede su ancho de banda, almacenamiento o límite de tamaño de archivo. En ese caso, es posible que desee comprimir un archivo PDF más grande a un tamaño más pequeño.

-

Comprimir un archivo PDF significa reducir su tamaño de archivo al eliminar o optimizar algunos de sus elementos, como imágenes, fuentes, metadatos y más. Comprimir un archivo PDF puede ayudarlo a ahorrar tiempo, datos y espacio al descargarlo, cargarlo, enviarlo o almacenarlo.

- -
    -
  • Smallpdf: Una herramienta en línea gratuita que puede comprimir archivos PDF hasta un 80% sin perder calidad
  • -
  • iLovePDF: Una herramienta en línea gratuita que puede comprimir archivos PDF hasta un 70% sin perder calidad
  • -
  • PDF Compressor: Una herramienta en línea gratuita que puede comprimir archivos PDF hasta un 90% sin perder calidad
  • -
  • PDF2Go: Una herramienta en línea gratuita que puede comprimir archivos PDF hasta un 50% sin perder calidad
  • -
  • Soda PDF: Una herramienta en línea gratuita que puede comprimir archivos PDF hasta un 75% sin perder calidad
  • -

Para usar una de estas herramientas o servicios para comprimir su archivo PDF, debe seguir estos pasos:

-
    -
  1. Ir al sitio web o aplicación de la herramienta o servicio que desea utilizar
  2. -
  3. Sube tu archivo PDF desde tu dispositivo o almacenamiento en la nube
  4. -
  5. Seleccione el nivel de compresión o la calidad que desea para su archivo PDF
  6. -
  7. Espere a que la herramienta o servicio comprima su archivo PDF
  8. -
  9. Descargue y guarde el archivo PDF comprimido en su dispositivo o almacenamiento en la nube
  10. -
-

Si encuentra algún problema o error al comprimir su archivo PDF, puede probar algunas de estas soluciones:

-
    -
  • Compruebe el tamaño del archivo y el formato de su archivo PDF
  • -
  • Compruebe el nivel de compresión y la calidad de su archivo PDF
  • -
  • Compruebe la compatibilidad y seguridad de la herramienta o servicio que utiliza
  • -
  • Pruebe una herramienta o servicio diferente
  • -
  • Póngase en contacto con la herramienta o proveedor de servicios para obtener apoyo o comentarios
  • -
-

Cómo abrir y ver un archivo PDF de 1 MB en su dispositivo

-

Después de descargar o comprimir un archivo PDF de 1 MB, debe abrirlo y verlo en su dispositivo. Puede usar varios programas o aplicaciones que pueden ayudarlo a abrir y ver archivos PDF. Algunos de ellos son:

-
    -
  • Adobe Acrobat Reader: Un software gratuito que puede abrir y ver archivos PDF en dispositivos Windows, Mac, Android e iOS
  • - -
  • Microsoft Edge: Un navegador web gratuito que puede abrir y ver archivos PDF en dispositivos Windows, Mac, Android e iOS
  • -
  • Foxit Reader: Un software libre que puede abrir y ver archivos PDF en dispositivos Windows, Mac, Linux, Android y iOS
  • -
  • PDF Viewer: Una aplicación gratuita que puede abrir y ver archivos PDF en dispositivos Android e iOS
  • -
-

Para abrir y ver un archivo PDF de 1 MB en su dispositivo, debe seguir estos pasos:

-
    -
  1. Instalar o actualizar el software o aplicación que desea utilizar en su dispositivo
  2. -
  3. Localice el archivo PDF de 1 MB en su dispositivo o almacenamiento en la nube
  4. -
  5. Abra el archivo PDF de 1 MB con el software o la aplicación que utiliza
  6. -
  7. Ver el archivo PDF de 1 MB en la pantalla del dispositivo
  8. -

Para ajustar la configuración o las preferencias de su software o aplicación para optimizar su experiencia de visualización, puede probar algunas de estas opciones:

-
    -
  • Acercar o alejar para cambiar el tamaño del archivo PDF en su pantalla
  • -
  • Gire o voltee el archivo PDF para cambiar la orientación del archivo PDF en su pantalla
  • -
  • Buscar o encontrar una palabra o frase en el archivo PDF
  • -
  • Resalta o anota una parte del archivo PDF
  • -
  • Imprimir o compartir el archivo PDF con otros
  • -
-

Conclusión

-

Descargar un archivo PDF de 1 MB no es una tarea difícil si sabes cómo hacerlo. En este artículo, le hemos mostrado cómo descargar un archivo PDF de 1 MB desde Internet, cómo comprimir un archivo PDF más grande a 1 MB o menos, y cómo abrir y ver un archivo PDF de 1 MB en su dispositivo. Esperamos que este artículo te haya ayudado a aprender algo nuevo y útil.

-

Aquí hay algunos consejos o consejos sobre cómo descargar, comprimir y ver archivos PDF de manera eficiente:

-
    -
  • Elija una fuente confiable y confiable o un sitio web que ofrezca archivos PDF gratuitos o de bajo costo
  • -
  • Compruebe el tamaño y el formato del archivo PDF antes de descargarlo o comprimirlo
  • - -
  • Comprimir el archivo PDF solo si es necesario y sin comprometer la calidad o el contenido
  • -
  • Abra y vea su archivo PDF con el mejor software o aplicación para su dispositivo y preferencia
  • -
-

Si tiene alguna pregunta o comentario sobre la descarga, compresión o visualización de archivos PDF, no dude en dejarlos a continuación. ¡Nos encantaría saber de ti!

-

Preguntas frecuentes

-

¿Cuál es la diferencia entre un archivo PDF y un archivo de Word?

-

Un archivo PDF es un formato de documento que conserva el diseño y la apariencia de su documento en diferentes dispositivos y plataformas. Un archivo de Word es un formato de documento que le permite editar y formatear su documento con varias características y opciones.

-

¿Cómo puedo convertir un archivo PDF a un archivo de Word o viceversa?

-

Puede utilizar varias herramientas o servicios que pueden ayudarle a convertir archivos PDF a archivos de Word o viceversa en línea de forma gratuita. Algunos de ellos son:

-
    -
  • PDF to Word Converter: Una herramienta en línea gratuita que puede convertir archivos PDF a archivos de Word en segundos
  • -
  • Word to PDF Converter: Una herramienta en línea gratuita que puede convertir archivos de Word a archivos PDF en segundos
  • -
  • PDFelement: Un software libre que puede convertir archivos PDF a archivos de Word y viceversa en dispositivos Windows, Mac, Android e iOS
  • -
  • WPS Office: Un software libre que puede convertir archivos PDF a archivos de Word y viceversa en Windows, Mac, Linux, Android y dispositivos iOS
  • -
  • Zamzar: Un servicio en línea gratuito que puede convertir archivos PDF a archivos de Word y viceversa por correo electrónico
  • -
-

¿Cómo puedo editar un archivo PDF?

-

Puede utilizar varias herramientas o servicios que pueden ayudarle a editar archivos PDF en línea de forma gratuita. Algunos de ellos son:

-
    -
  • PDFescape: Una herramienta en línea gratuita que puede ayudarte a editar texto, imágenes, enlaces, formularios y más en archivos PDF
  • - -
  • PDF-XChange Editor: Un software gratuito que puede ayudarte a editar texto, imágenes, comentarios, sellos y más en archivos PDF en dispositivos Windows
  • -
  • PDF Expert: Un software gratuito que puede ayudarte a editar texto, imágenes, enlaces, formularios y más en archivos PDF en dispositivos Mac
  • -
  • Xodo: una aplicación gratuita que puede ayudarte a editar texto, imágenes, anotaciones, marcadores y más en archivos PDF en dispositivos Android e iOS
  • -
-

¿Cómo puedo combinar o dividir un archivo PDF?

-

Puede usar varias herramientas o servicios que pueden ayudarlo a combinar o dividir archivos PDF en línea de forma gratuita. Algunos de ellos son:

-
    -
  • PDF Merge: Una herramienta gratuita en línea que puede ayudarle a combinar varios archivos PDF en un archivo PDF
  • -
  • PDF Splitter: Una herramienta en línea gratuita que puede ayudarle a dividir un archivo PDF en varios archivos PDF
  • -
  • PDF SAM: Una herramienta en línea gratuita que puede ayudarle a combinar o dividir archivos PDF con arrastrar y soltar
  • -
  • PDFill: Un software libre que puede ayudarle a combinar o dividir archivos PDF con varias opciones en dispositivos Windows
  • -
  • PDFsam Basic: Un software libre que puede ayudarle a combinar o dividir archivos PDF con varias opciones en dispositivos Windows, Mac y Linux
  • -
-

¿Cómo puedo firmar un archivo PDF?

-

Puede utilizar varias herramientas o servicios que pueden ayudarle a firmar archivos PDF en línea de forma gratuita. Algunos de ellos son:

-
    -
  • DocuSign: Un servicio gratuito en línea que puede ayudarle a firmar archivos PDF con su firma electrónica o certificado digital
  • -
  • HelloSign: Un servicio gratuito en línea que puede ayudarle a firmar archivos PDF con su firma electrónica o certificado digital
  • -
  • Adobe Sign: Un servicio gratuito en línea que puede ayudarle a firmar archivos PDF con su firma electrónica o certificado digital
  • - -
  • SignNow: Una aplicación gratuita que puede ayudarte a firmar archivos PDF con tu firma electrónica o certificado digital en dispositivos Android e iOS
  • -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/__about__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/__about__.py deleted file mode 100644 index 3551bc2d29846441299cf57b397b02fc164c99b9..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/packaging/__about__.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -__all__ = [ - "__title__", - "__summary__", - "__uri__", - "__version__", - "__author__", - "__email__", - "__license__", - "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "21.3" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD-2-Clause or Apache-2.0" -__copyright__ = "2014-2019 %s" % __author__ diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/installer.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/installer.py deleted file mode 100644 index b7096df14b4a15980ad138a3990d3e25aeb3bfe1..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/installer.py +++ /dev/null @@ -1,104 +0,0 @@ -import glob -import os -import subprocess -import sys -import tempfile -import warnings -from distutils import log -from distutils.errors import DistutilsError - -import pkg_resources -from setuptools.wheel import Wheel -from ._deprecation_warning import SetuptoolsDeprecationWarning - - -def _fixup_find_links(find_links): - """Ensure find-links option end-up being a list of strings.""" - if isinstance(find_links, str): - return find_links.split() - assert isinstance(find_links, (tuple, list)) - return find_links - - -def fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME - """Fetch an egg needed for building. - - Use pip/wheel to fetch/build a wheel.""" - warnings.warn( - "setuptools.installer is deprecated. Requirements should " - "be satisfied by a PEP 517 installer.", - SetuptoolsDeprecationWarning, - ) - # Warn if wheel is not available - try: - pkg_resources.get_distribution('wheel') - except pkg_resources.DistributionNotFound: - dist.announce('WARNING: The wheel package is not available.', log.WARN) - # Ignore environment markers; if supplied, it is required. - req = strip_marker(req) - # Take easy_install options into account, but do not override relevant - # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll - # take precedence. - opts = dist.get_option_dict('easy_install') - if 'allow_hosts' in opts: - raise DistutilsError('the `allow-hosts` option is not supported ' - 'when using pip to install requirements.') - quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ - if 'PIP_INDEX_URL' in os.environ: - index_url = None - elif 'index_url' in opts: - index_url = opts['index_url'][1] - else: - index_url = None - find_links = ( - _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts - else [] - ) - if dist.dependency_links: - find_links.extend(dist.dependency_links) - eggs_dir = os.path.realpath(dist.get_egg_cache_dir()) - environment = pkg_resources.Environment() - for egg_dist in pkg_resources.find_distributions(eggs_dir): - if egg_dist in req and environment.can_add(egg_dist): - return egg_dist - with tempfile.TemporaryDirectory() as tmpdir: - cmd = [ - sys.executable, '-m', 'pip', - '--disable-pip-version-check', - 'wheel', '--no-deps', - '-w', tmpdir, - ] - if quiet: - cmd.append('--quiet') - if index_url is not None: - cmd.extend(('--index-url', index_url)) - for link in find_links or []: - cmd.extend(('--find-links', link)) - # If requirement is a PEP 508 direct URL, directly pass - # the URL to pip, as `req @ url` does not work on the - # command line. - cmd.append(req.url or str(req)) - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError as e: - raise DistutilsError(str(e)) from e - wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0]) - dist_location = os.path.join(eggs_dir, wheel.egg_name()) - wheel.install_as_egg(dist_location) - dist_metadata = pkg_resources.PathMetadata( - dist_location, os.path.join(dist_location, 'EGG-INFO')) - dist = pkg_resources.Distribution.from_filename( - dist_location, metadata=dist_metadata) - return dist - - -def strip_marker(req): - """ - Return a new requirement without the environment marker to avoid - calling pip with something like `babel; extra == "i18n"`, which - would always be ignored. - """ - # create a copy to avoid mutating the input - req = pkg_resources.Requirement.parse(str(req)) - req.marker = None - return req diff --git a/spaces/Blealtan/clip-guided-binary-autoencoder/README.md b/spaces/Blealtan/clip-guided-binary-autoencoder/README.md deleted file mode 100644 index aba547ce0b9ef7cca7081a544f8547d0d212d424..0000000000000000000000000000000000000000 --- a/spaces/Blealtan/clip-guided-binary-autoencoder/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Clip Guided Binary Autoencoder -emoji: 🌍 -colorFrom: pink -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/Makefile b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/Makefile deleted file mode 100644 index d537643dd411736a5f309383cfef52ea7d5e4599..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -# Minimal makefile for Sphinx documentation -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/spaces/CVPR/LIVE/pybind11/pybind11/_version.py b/spaces/CVPR/LIVE/pybind11/pybind11/_version.py deleted file mode 100644 index 1f2f254ce5e262fa7fb4770e1b770935ea46ecc0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pybind11/pybind11/_version.py +++ /dev/null @@ -1,3 +0,0 @@ -# -*- coding: utf-8 -*- -version_info = (2, 5, 'dev1') -__version__ = '.'.join(map(str, version_info)) diff --git a/spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/compound_assignment_operators.h b/spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/compound_assignment_operators.h deleted file mode 100644 index 737d6abd098e0acc666ec9678e3219d8c9586cca..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/detail/functional/operators/compound_assignment_operators.h +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include - -namespace thrust -{ -namespace detail -{ -namespace functional -{ - -// there's no standard plus_equal functional, so roll an ad hoc one here -struct plus_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) += THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) += THRUST_FWD(t2)) - { - return THRUST_FWD(t1) += THRUST_FWD(t2); - } -}; - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator+=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator+=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator+=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator+=() - -// there's no standard minus_equal functional, so roll an ad hoc one here -struct minus_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) -= THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) -= THRUST_FWD(t2)) - { - return THRUST_FWD(t1) -= THRUST_FWD(t2); - } -}; - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator-=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator-=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator-=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator-=() - -// there's no standard multiplies_equal functional, so roll an ad hoc one here -struct multiplies_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) *= THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) *= THRUST_FWD(t2)) - { - return THRUST_FWD(t1) *= THRUST_FWD(t2); - } -}; - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator*=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator*=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator*=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator*=() - -// there's no standard divides_equal functional, so roll an ad hoc one here -struct divides_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) /= THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) /= THRUST_FWD(t2)) - { - return THRUST_FWD(t1) /= THRUST_FWD(t2); - } -}; - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator/=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator/=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator/=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator/=() - -// there's no standard modulus_equal functional, so roll an ad hoc one here -struct modulus_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) %= THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) %= THRUST_FWD(t2)) - { - return THRUST_FWD(t1) %= THRUST_FWD(t2); - } -}; - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator%=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator%=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator%=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator%=() - -// there's no standard bit_and_equal functional, so roll an ad hoc one here -struct bit_and_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) &= THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) &= THRUST_FWD(t2)) - { - return THRUST_FWD(t1) &= THRUST_FWD(t2); - } -}; - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator&=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator&=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator&=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator&=() - -// there's no standard bit_or_equal functional, so roll an ad hoc one here -struct bit_or_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) |= THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) |= THRUST_FWD(t2)) - { - return THRUST_FWD(t1) |= THRUST_FWD(t2); - } -}; - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator|=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator|=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator|=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator|=() - -// there's no standard bit_xor_equal functional, so roll an ad hoc one here -struct bit_xor_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) ^= THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) ^= THRUST_FWD(t2)) - { - return THRUST_FWD(t1) ^= THRUST_FWD(t2); - } -}; - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator^=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator|=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator^=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator|=() - -// there's no standard bit_lshift_equal functional, so roll an ad hoc one here -struct bit_lshift_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) <<= THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) <<= THRUST_FWD(t2)) - { - return THRUST_FWD(t1) <<= THRUST_FWD(t2); - } -}; -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator<<=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator<<=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator<<=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator<<=() - -// there's no standard bit_rshift_equal functional, so roll an ad hoc one here -struct bit_rshift_equal -{ - using is_transparent = void; - - __thrust_exec_check_disable__ - template - __host__ __device__ - constexpr auto operator()(T1&& t1, T2&& t2) const - noexcept(noexcept(THRUST_FWD(t1) >>= THRUST_FWD(t2))) - -> decltype(THRUST_FWD(t1) >>= THRUST_FWD(t2)) - { - return THRUST_FWD(t1) >>= THRUST_FWD(t2); - } -}; - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - typename as_actor::type - > -> -operator>>=(const actor &_1, const T2 &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator>>=() - -template -__host__ __device__ -actor< - composite< - transparent_binary_operator, - actor, - actor - > -> -operator>>=(const actor &_1, const actor &_2) -{ - return compose(transparent_binary_operator(), - make_actor(_1), - make_actor(_2)); -} // end operator>>=() - -} // end functional -} // end detail -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/sequence.h b/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/sequence.h deleted file mode 100644 index c6ae90664ad9538e73febfde86c334011de417c8..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/sequence.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system has no special version of this algorithm - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/partition.h b/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/partition.h deleted file mode 100644 index 64a76e2788725134cf456742f33d57714e0f071f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/partition.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file reduce.h - * \brief OpenMP implementation of reduce algorithms. - */ - -#pragma once - -#include -#include -#include - -namespace thrust -{ -namespace system -{ -namespace omp -{ -namespace detail -{ - - -template - ForwardIterator stable_partition(execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - Predicate pred); - -template - ForwardIterator stable_partition(execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - InputIterator stencil, - Predicate pred); - -template - thrust::pair - stable_partition_copy(execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator1 out_true, - OutputIterator2 out_false, - Predicate pred); - -template - thrust::pair - stable_partition_copy(execution_policy &exec, - InputIterator1 first, - InputIterator1 last, - InputIterator2 stencil, - OutputIterator1 out_true, - OutputIterator2 out_false, - Predicate pred); - - -} // end namespace detail -} // end namespace omp -} // end namespace system -} // end namespace thrust - -#include - diff --git a/spaces/CVPR/Text2Human/Text2Human/utils/util.py b/spaces/CVPR/Text2Human/Text2Human/utils/util.py deleted file mode 100644 index f51663ff61a7ebb0b6c3b34633dcf417039fb762..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Text2Human/Text2Human/utils/util.py +++ /dev/null @@ -1,123 +0,0 @@ -import logging -import os -import random -import sys -import time -from shutil import get_terminal_size - -import numpy as np -import torch - -logger = logging.getLogger('base') - - -def make_exp_dirs(opt): - """Make dirs for experiments.""" - path_opt = opt['path'].copy() - if opt['is_train']: - overwrite = True if 'debug' in opt['name'] else False - os.makedirs(path_opt.pop('experiments_root'), exist_ok=overwrite) - os.makedirs(path_opt.pop('models'), exist_ok=overwrite) - else: - os.makedirs(path_opt.pop('results_root')) - - -def set_random_seed(seed): - """Set random seeds.""" - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) - - -class ProgressBar(object): - """A progress bar which can print the progress. - - Modified from: - https://github.com/hellock/cvbase/blob/master/cvbase/progress.py - """ - - def __init__(self, task_num=0, bar_width=50, start=True): - self.task_num = task_num - max_bar_width = self._get_max_bar_width() - self.bar_width = ( - bar_width if bar_width <= max_bar_width else max_bar_width) - self.completed = 0 - if start: - self.start() - - def _get_max_bar_width(self): - terminal_width, _ = get_terminal_size() - max_bar_width = min(int(terminal_width * 0.6), terminal_width - 50) - if max_bar_width < 10: - print(f'terminal width is too small ({terminal_width}), ' - 'please consider widen the terminal for better ' - 'progressbar visualization') - max_bar_width = 10 - return max_bar_width - - def start(self): - if self.task_num > 0: - sys.stdout.write(f"[{' ' * self.bar_width}] 0/{self.task_num}, " - f'elapsed: 0s, ETA:\nStart...\n') - else: - sys.stdout.write('completed: 0, elapsed: 0s') - sys.stdout.flush() - self.start_time = time.time() - - def update(self, msg='In progress...'): - self.completed += 1 - elapsed = time.time() - self.start_time - fps = self.completed / elapsed - if self.task_num > 0: - percentage = self.completed / float(self.task_num) - eta = int(elapsed * (1 - percentage) / percentage + 0.5) - mark_width = int(self.bar_width * percentage) - bar_chars = '>' * mark_width + '-' * (self.bar_width - mark_width) - sys.stdout.write('\033[2F') # cursor up 2 lines - sys.stdout.write( - '\033[J' - ) # clean the output (remove extra chars since last display) - sys.stdout.write( - f'[{bar_chars}] {self.completed}/{self.task_num}, ' - f'{fps:.1f} task/s, elapsed: {int(elapsed + 0.5)}s, ' - f'ETA: {eta:5}s\n{msg}\n') - else: - sys.stdout.write( - f'completed: {self.completed}, elapsed: {int(elapsed + 0.5)}s, ' - f'{fps:.1f} tasks/s') - sys.stdout.flush() - - -class AverageMeter(object): - """ - Computes and stores the average and current value - Imported from - https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262 - """ - - def __init__(self): - self.reset() - - def reset(self): - self.val = 0 - self.avg = 0 # running average = running sum / running count - self.sum = 0 # running sum - self.count = 0 # running count - - def update(self, val, n=1): - # n = batch_size - - # val = batch accuracy for an attribute - # self.val = val - - # sum = 100 * accumulative correct predictions for this attribute - self.sum += val * n - - # count = total samples so far - self.count += n - - # avg = 100 * avg accuracy for this attribute - # for all the batches so far - self.avg = self.sum / self.count diff --git a/spaces/CVPR/WALT/mmdet/datasets/utils.py b/spaces/CVPR/WALT/mmdet/datasets/utils.py deleted file mode 100644 index 157c9a2e1fe009552fdec9b9c9e7a33ed46d51ff..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/datasets/utils.py +++ /dev/null @@ -1,158 +0,0 @@ -import copy -import warnings - -from mmcv.cnn import VGG -from mmcv.runner.hooks import HOOKS, Hook - -from mmdet.datasets.builder import PIPELINES -from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile -from mmdet.models.dense_heads import GARPNHead, RPNHead -from mmdet.models.roi_heads.mask_heads import FusedSemanticHead - - -def replace_ImageToTensor(pipelines): - """Replace the ImageToTensor transform in a data pipeline to - DefaultFormatBundle, which is normally useful in batch inference. - - Args: - pipelines (list[dict]): Data pipeline configs. - - Returns: - list: The new pipeline list with all ImageToTensor replaced by - DefaultFormatBundle. - - Examples: - >>> pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict( - ... type='MultiScaleFlipAug', - ... img_scale=(1333, 800), - ... flip=False, - ... transforms=[ - ... dict(type='Resize', keep_ratio=True), - ... dict(type='RandomFlip'), - ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), - ... dict(type='Pad', size_divisor=32), - ... dict(type='ImageToTensor', keys=['img']), - ... dict(type='Collect', keys=['img']), - ... ]) - ... ] - >>> expected_pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict( - ... type='MultiScaleFlipAug', - ... img_scale=(1333, 800), - ... flip=False, - ... transforms=[ - ... dict(type='Resize', keep_ratio=True), - ... dict(type='RandomFlip'), - ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), - ... dict(type='Pad', size_divisor=32), - ... dict(type='DefaultFormatBundle'), - ... dict(type='Collect', keys=['img']), - ... ]) - ... ] - >>> assert expected_pipelines == replace_ImageToTensor(pipelines) - """ - pipelines = copy.deepcopy(pipelines) - for i, pipeline in enumerate(pipelines): - if pipeline['type'] == 'MultiScaleFlipAug': - assert 'transforms' in pipeline - pipeline['transforms'] = replace_ImageToTensor( - pipeline['transforms']) - elif pipeline['type'] == 'ImageToTensor': - warnings.warn( - '"ImageToTensor" pipeline is replaced by ' - '"DefaultFormatBundle" for batch inference. It is ' - 'recommended to manually replace it in the test ' - 'data pipeline in your config file.', UserWarning) - pipelines[i] = {'type': 'DefaultFormatBundle'} - return pipelines - - -def get_loading_pipeline(pipeline): - """Only keep loading image and annotations related configuration. - - Args: - pipeline (list[dict]): Data pipeline configs. - - Returns: - list[dict]: The new pipeline list with only keep - loading image and annotations related configuration. - - Examples: - >>> pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict(type='LoadAnnotations', with_bbox=True), - ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - ... dict(type='RandomFlip', flip_ratio=0.5), - ... dict(type='Normalize', **img_norm_cfg), - ... dict(type='Pad', size_divisor=32), - ... dict(type='DefaultFormatBundle'), - ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) - ... ] - >>> expected_pipelines = [ - ... dict(type='LoadImageFromFile'), - ... dict(type='LoadAnnotations', with_bbox=True) - ... ] - >>> assert expected_pipelines ==\ - ... get_loading_pipeline(pipelines) - """ - loading_pipeline_cfg = [] - for cfg in pipeline: - obj_cls = PIPELINES.get(cfg['type']) - # TODO:use more elegant way to distinguish loading modules - if obj_cls is not None and obj_cls in (LoadImageFromFile, - LoadAnnotations): - loading_pipeline_cfg.append(cfg) - assert len(loading_pipeline_cfg) == 2, \ - 'The data pipeline in your config file must include ' \ - 'loading image and annotations related pipeline.' - return loading_pipeline_cfg - - -@HOOKS.register_module() -class NumClassCheckHook(Hook): - - def _check_head(self, runner): - """Check whether the `num_classes` in head matches the length of - `CLASSSES` in `dataset`. - - Args: - runner (obj:`EpochBasedRunner`): Epoch based Runner. - """ - model = runner.model - dataset = runner.data_loader.dataset - if dataset.CLASSES is None: - runner.logger.warning( - f'Please set `CLASSES` ' - f'in the {dataset.__class__.__name__} and' - f'check if it is consistent with the `num_classes` ' - f'of head') - else: - for name, module in model.named_modules(): - if hasattr(module, 'num_classes') and not isinstance( - module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)): - assert module.num_classes == len(dataset.CLASSES), \ - (f'The `num_classes` ({module.num_classes}) in ' - f'{module.__class__.__name__} of ' - f'{model.__class__.__name__} does not matches ' - f'the length of `CLASSES` ' - f'{len(dataset.CLASSES)}) in ' - f'{dataset.__class__.__name__}') - - def before_train_epoch(self, runner): - """Check whether the training dataset is compatible with head. - - Args: - runner (obj:`EpochBasedRunner`): Epoch based Runner. - """ - self._check_head(runner) - - def before_val_epoch(self, runner): - """Check whether the dataset in val epoch is compatible with head. - - Args: - runner (obj:`EpochBasedRunner`): Epoch based Runner. - """ - self._check_head(runner) diff --git a/spaces/Chaitanya01/InvestingPlatform/patterns.py b/spaces/Chaitanya01/InvestingPlatform/patterns.py deleted file mode 100644 index 0be810b3df079e30c04651f86203f988cc819e8f..0000000000000000000000000000000000000000 --- a/spaces/Chaitanya01/InvestingPlatform/patterns.py +++ /dev/null @@ -1,64 +0,0 @@ -# Candlestick patterns mapping -patterns = { - 'CDL2CROWS':'Two Crows', - 'CDL3BLACKCROWS':'Three Black Crows', - 'CDL3INSIDE':'Three Inside Up/Down', - 'CDL3LINESTRIKE':'Three-Line Strike', - 'CDL3OUTSIDE':'Three Outside Up/Down', - 'CDL3STARSINSOUTH':'Three Stars In The South', - 'CDL3WHITESOLDIERS':'Three Advancing White Soldiers', - 'CDLABANDONEDBABY':'Abandoned Baby', - 'CDLADVANCEBLOCK':'Advance Block', - 'CDLBELTHOLD':'Belt-hold', - 'CDLBREAKAWAY':'Breakaway', - 'CDLCLOSINGMARUBOZU':'Closing Marubozu', - 'CDLCONCEALBABYSWALL':'Concealing Baby Swallow', - 'CDLCOUNTERATTACK':'Counterattack', - 'CDLDARKCLOUDCOVER':'Dark Cloud Cover', - 'CDLDOJI':'Doji', - 'CDLDOJISTAR':'Doji Star', - 'CDLDRAGONFLYDOJI':'Dragonfly Doji', - 'CDLENGULFING':'Engulfing Pattern', - 'CDLEVENINGDOJISTAR':'Evening Doji Star', - 'CDLEVENINGSTAR':'Evening Star', - 'CDLGAPSIDESIDEWHITE':'Up/Down-gap side-by-side white lines', - 'CDLGRAVESTONEDOJI':'Gravestone Doji', - 'CDLHAMMER':'Hammer', - 'CDLHANGINGMAN':'Hanging Man', - 'CDLHARAMI':'Harami Pattern', - 'CDLHARAMICROSS':'Harami Cross Pattern', - 'CDLHIGHWAVE':'High-Wave Candle', - 'CDLHIKKAKE':'Hikkake Pattern', - 'CDLHIKKAKEMOD':'Modified Hikkake Pattern', - 'CDLHOMINGPIGEON':'Homing Pigeon', - 'CDLIDENTICAL3CROWS':'Identical Three Crows', - 'CDLINNECK':'In-Neck Pattern', - 'CDLINVERTEDHAMMER':'Inverted Hammer', - 'CDLKICKING':'Kicking', - 'CDLKICKINGBYLENGTH':'Kicking - bull/bear determined by the longer marubozu', - 'CDLLADDERBOTTOM':'Ladder Bottom', - 'CDLLONGLEGGEDDOJI':'Long Legged Doji', - 'CDLLONGLINE':'Long Line Candle', - 'CDLMARUBOZU':'Marubozu', - 'CDLMATCHINGLOW':'Matching Low', - 'CDLMATHOLD':'Mat Hold', - 'CDLMORNINGDOJISTAR':'Morning Doji Star', - 'CDLMORNINGSTAR':'Morning Star', - 'CDLONNECK':'On-Neck Pattern', - 'CDLPIERCING':'Piercing Pattern', - 'CDLRICKSHAWMAN':'Rickshaw Man', - 'CDLRISEFALL3METHODS':'Rising/Falling Three Methods', - 'CDLSEPARATINGLINES':'Separating Lines', - 'CDLSHOOTINGSTAR':'Shooting Star', - 'CDLSHORTLINE':'Short Line Candle', - 'CDLSPINNINGTOP':'Spinning Top', - 'CDLSTALLEDPATTERN':'Stalled Pattern', - 'CDLSTICKSANDWICH':'Stick Sandwich', - 'CDLTAKURI':'Takuri (Dragonfly Doji with very long lower shadow)', - 'CDLTASUKIGAP':'Tasuki Gap', - 'CDLTHRUSTING':'Thrusting Pattern', - 'CDLTRISTAR':'Tristar Pattern', - 'CDLUNIQUE3RIVER':'Unique 3 River', - 'CDLUPSIDEGAP2CROWS':'Upside Gap Two Crows', - 'CDLXSIDEGAP3METHODS':'Upside/Downside Gap Three Methods' -} \ No newline at end of file diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/addiction/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/addiction/__init__.py deleted file mode 100644 index 650ea6fa7fa2b58d42daa8a6e0514aea8c5c8a50..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/addiction/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -from pathlib import Path -from typing import List - -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.exception import TextOverLength -from meme_generator.utils import make_jpg_or_gif - -img_dir = Path(__file__).parent / "images" - - -def addiction(images: List[BuildImage], texts: List[str], args): - frame = BuildImage.open(img_dir / "0.png") - - if texts: - text = texts[0] - frame = frame.resize_canvas((246, 286), direction="north", bg_color="white") - try: - frame.draw_text((10, 246, 236, 286), texts[0], max_fontsize=45) - except ValueError: - raise TextOverLength(text) - - def make(img: BuildImage) -> BuildImage: - img = img.convert("RGBA").resize((91, 91), keep_ratio=True) - return frame.copy().paste(img, alpha=True) - - return make_jpg_or_gif(images[0], make) - - -add_meme( - "addiction", - addiction, - min_images=1, - max_images=1, - min_texts=0, - max_texts=1, - keywords=["上瘾", "毒瘾发作"], -) diff --git a/spaces/ClinBAY/Safeterm_Demo/app.py b/spaces/ClinBAY/Safeterm_Demo/app.py deleted file mode 100644 index 4b0b3d107f7bd3924bb92ea4e04a657f3627db42..0000000000000000000000000000000000000000 --- a/spaces/ClinBAY/Safeterm_Demo/app.py +++ /dev/null @@ -1,410 +0,0 @@ -import gradio as gr -import requests -import json -from send_email_request import send_email - - -def request_api_key_form(req_email, req_name, req_organization, req_meddra_license, req_agree_terms, req_save_data): - # Check if all required fields are filled and the conditions are met - if not req_email or not req_name or not req_organization: - return "**Please fill in all required fields.**" - if "@" not in req_email or "." not in req_email: - return "**Please enter a valid email.**" - if not req_meddra_license: - return "**You need to have a valid MedDRA license.**" - if not req_agree_terms: - return "**You need to agree to Safeterm terms of use.**" - if not req_save_data: - return "**You need to agree to share data for training and communication purposes.**" - - subject = "API Key Request for Safeterm" - - send_email(subject, req_email, req_name, req_organization, req_meddra_license, req_agree_terms, req_save_data) - - return "**Your request for API key has been submitted successfully! We will send it to you via mail soon!**" - - -def encode_caller(apikey, reported_terms_encoder, validation_request, single_term): - if not apikey: - return "Please enter a valid API key!" - - url = "https://safeterm.proddis.com/meddra_encode" - reported_terms_list = [term.strip() for term in reported_terms_encoder.split(",")] - - # Convert string values to boolean - validation_request_bool = validation_request == "True" - single_term_bool = single_term == "True" - - payload = json.dumps({ - "reported_terms": reported_terms_list, - "nmax": 1, - "version": float("26.0"), - "validation_request": validation_request_bool, - "single_term": single_term_bool - }) - - headers = { - 'Content-Type': 'application/json', - 'Authorization': f'Bearer {apikey}' - } - - response = requests.post(url, headers=headers, data=payload) - data = response.json() - - if "detail" in data: - return data["detail"] - - results = [] - - for encoded_term in data.get("encoded_terms", []): - result = f"Reported Term: {encoded_term['term']}\n" - - # Check if 'encoded_term' contains a list of dictionaries (valid_request = False) - if isinstance(encoded_term['encoded_term'][0], dict): - for sub_term_entry in encoded_term['encoded_term']: - for llt_id, llt_term in zip(sub_term_entry['llt_id'], sub_term_entry['llt_term']): - result += f"LLT ID: {llt_id}\nLLT Term: {llt_term}\n" - - pt_id = sub_term_entry['pt_id'][0] - pt_term = sub_term_entry['pt_term'][0] - result += f"PT ID: {pt_id}\nPT Term: {pt_term}\n" - result += "---\n" - - # Case for valid_request = True (list of lists) - else: - for sub_list in encoded_term['encoded_term']: - for sub_term_entry in sub_list: - if 'llt_id' in sub_term_entry and 'llt_term' in sub_term_entry: - for llt_id, llt_term in zip(sub_term_entry['llt_id'], sub_term_entry['llt_term']): - result += f"LLT ID: {llt_id}\nLLT Term: {llt_term}\n" - - pt_id = sub_term_entry['pt_id'][0] - pt_term = sub_term_entry['pt_term'][0] - result += f"PT ID: {pt_id}\nPT Term: {pt_term}\n" - result += "---\n" - - # Handle other entries like "is_validated" and "report" - else: - for key, value in sub_term_entry.items(): - result += f"{key}: {value}\n" - result += "---\n" - - result += f"Status: {encoded_term['status']}\n\n" - results.append(result) - - # Add the API messages at the end. - api_message = data.get("messages", "No API message available") - api_message = "OK" if api_message is None else api_message - results.append(f"API Message: {api_message}") - - return "\n".join(results) - - -def validate_caller(val_apikey, val_reported_terms, val_llt_terms): - if not val_apikey: - return "Please enter a valid API key!" - url = "https://safeterm.proddis.com/meddra_validate" - - # Convert comma-separated strings into lists - val_reported_terms_list = [term.strip() for term in val_reported_terms.split(",")] - val_llt_terms_list = [term.strip() for term in val_llt_terms.split(",")] - - payload = json.dumps({ - "reported_terms": val_reported_terms_list, - "llt_terms": val_llt_terms_list, - "nmax": 1, - "version": float("26.0") - }) - headers = { - 'Content-Type': 'application/json', - 'Authorization': f'Bearer {val_apikey}' - } - - response = requests.request("POST", url, headers=headers, data=payload, timeout=60) - - data = response.json() - - # Check if the response contains an error detail - if "detail" in data: - return data["detail"] - - results = [] - for validation_item in data["responses"]: - report = validation_item.get("report", "No report") - - if validation_item["best_dict_term"]: - llt_id = validation_item["best_dict_term"][0]["llt_id"][0] - llt_term = validation_item["best_dict_term"][0]["llt_term"][0] - pt_id = validation_item["best_dict_term"][0]["pt_id"][0] - pt_term = validation_item["best_dict_term"][0]["pt_term"][0] - - result = f"Report: {report}\n" - result += f"LLT ID: {llt_id}\n" - result += f"LLT Term: {llt_term}\n" - result += f"PT ID: {pt_id}\n" - result += f"PT Term: {pt_term}\n\n" - else: - result = f"Report: {report}\nNo matching terms found.\n\n" - - results.append(result) - - # Add the API messages at the end. - api_message = data.get("messages", "No API message available") - api_message = "OK" if api_message is None else api_message - results.append(f"API key status: {api_message}") - - return "\n".join(results) - - -def upgrade_caller(api_key, upgrader_list_of_terms, version_upgrade): - if not api_key: - return "Please enter a valid API key!" - url = "https://safeterm.proddis.com/meddra_version_upgrade" - - cleaned_terms = [term.strip() for term in upgrader_list_of_terms.split(",")] - - payload = json.dumps({ - "list_of_terms": cleaned_terms, - "nmax": 1, - "version": float(version_upgrade), - "verbose": True - }) - headers = { - 'Content-Type': 'application/json', - 'Authorization': f'Bearer {api_key}' - } - response = requests.request("POST", url, headers=headers, data=payload) - - data = response.json() - - # Check if the response contains an error detail - if "detail" in data: - return data["detail"] - - output = [] - results = data.get("result", []) - for i, result in enumerate(results): - input_term = cleaned_terms[i] # Assuming the order in 'results' and 'cleaned_terms' is the same - - if result['change_status'] is None or result['output_term'] is None: - status_message = result.get('status', 'Unknown Status') - output.append(f"Input Term: {input_term}\nStatus: {status_message}\n") - else: - status = "Unchanged" if not result['change_status'] else "Changed" - output_term = result['output_term'] - output.append(f"Input Term: {input_term}\nStatus: {status}\nOutput Term: {output_term}\n") - - # Get the API message, set to "OK" if it's None - api_message = data.get("messages", "No API message available") - api_message = "OK" if api_message is None else api_message - - output.append(f"API status: {api_message}") - - return "\n".join(output).strip() # Removing any trailing whitespace - - -def llt_current_caller(api_key, currency_list_of_terms, version_check): - if not api_key: - return "Please enter a valid API key!" - - url = "https://safeterm.proddis.com/meddra_llt_is_current" - cleaned_terms_currency = [term.strip() for term in currency_list_of_terms.split(",")] - - payload = json.dumps({ - "list_of_terms": cleaned_terms_currency, - "version": float(version_check) - }) - - headers = { - 'Content-Type': 'application/json', - 'Authorization': f'Bearer {api_key}' - } - - response = requests.request("POST", url, headers=headers, data=payload) - response_data = response.json() - - # Check if 'is_current_flag_results' key exists in the API response - if "is_current_flag_results" not in response_data: - # If 'detail' key exists in response_data, show the error detail, else show a generic error - return response_data.get("detail", "An error occurred.") - - beautified_output = [] - for term, flag_data in zip(cleaned_terms_currency, response_data["is_current_flag_results"]): - flag = flag_data.get('is_current_flag', None) - status = flag_data.get('status', '') - - if isinstance(flag, bool): - status_text = "Is Current" if flag else "Is not Current" - beautified_output.append(f"{term}: {status_text}") - - else: - beautified_output.append(f"Error {term}: {status}") - - # Get the API message, set to "OK" if it's None - api_message = response_data.get("messages", None) - api_message = "OK" if api_message is None else api_message - beautified_output.append(f"API status: {api_message}") - - return "\n".join(beautified_output) - - -with gr.Blocks(css=".gradio-container {background-color: lightgray}") as demo: - # gr.Markdown("Safeterm is an automated AI system that extracts medical terms from patient narratives and - # standardize these terms according to the medical dictionary for regulatory applications (MedDRA). ") - gr.Markdown("### Safeterm: Translate Medical Narratives into Standardized Dictionaries") - with gr.Row(): - with gr.Column(): - gr.HTML( - """

Safeterm is an automated AI system that extracts medical terms from patient narratives and - standardize these terms according to the medical dictionary for regulatory applications (MedDRA). -

""") - with gr.Column(): - universal_api_key_input = gr.Textbox(label="Paste your API Key", placeholder="Enter your API Key...", - lines=1) - with gr.Row(): - with gr.Tab("Safeterm Encode"): - gr.Markdown("### Safeterm Encode") - - # Inputs - encode_reported_terms = gr.Dropdown( - ["Headache, Allergic to CAT scan", "Myocardial infarction in the fall of 2000", - "Nurse miscalculated the dose and the patient received 40 mg instead of 20 mg of his medication. He " - "experienced severe hypotension and heart attack."], - label="Medical Narratives (comma-separated, max 5)", - info="Enter your reported medical narratives (1 or more comma-separated statements, example: " - "Headache, Allergic to CAT scan) here or choose from the presets.", - allow_custom_value=True) - - encode_version = gr.Textbox(label="MedDRA Version", value="26.0") - - # New input boxes - validation_request = gr.Radio(label="Validation Request", choices=["True", "False"]) - single_term = gr.Radio(label="Single Term", choices=["True", "False"]) - - # Output - api_response_encode = gr.Textbox(label="Standardized Medical Dictionary Outputs") - - # Button to trigger API call - submit_button = gr.Button("Encode into MedDRA") - - # Binding API call function to button (Note: Assuming `universal_api_key_input` was defined somewhere - # before this block) - submit_button.click(encode_caller, - inputs=[universal_api_key_input, encode_reported_terms, validation_request, - single_term], - outputs=api_response_encode) - - with gr.Tab("Safeterm Validate"): - gr.Markdown('### Safeterm Validate') - gr.Markdown( - 'Checks an existing MedDRA encoding. Compares reported terms to MedDRA Lower Level Terms (LLT). ' - 'Provides an alternative LLT in case of discrepancy.') - # Validator Inputs - reported_terms_validate = gr.Dropdown(["Pain in the head, Feeling nauseous"] - , label="Reported Terms (comma-separated, max 5)" - , - info="Enter your reported terms here (example: pain in the " - "head, Feeling nauseous) or choose from the dropdown preset", - allow_custom_value=True) - llt_terms_validate = gr.Dropdown(["Headache, Vomiting"] - , label="LLT terms (comma-separated, max 5)" - , - info="Enter the current LLTs here (example: Headache, Vomitting), " - "or choose" - "from the dropdown presets", - allow_custom_value=True) - version_validate = gr.Textbox(label="MedDRA Version", value="26.0") - - # Output - api_response_validate = gr.Textbox(label="Validation Report") - - # Button to trigger API call and Binding API call function to button for Validator - submit_button_validate = gr.Button("Validate Existing MedDRA Encoding") - submit_button_validate.click(validate_caller, - inputs=[universal_api_key_input, reported_terms_validate, llt_terms_validate], - outputs=api_response_validate) - with gr.Tab("Safeterm Version Upgrade"): - gr.Markdown("### Safeterm Version Upgrade") - gr.Markdown('Upgrade terms to the most up-to-date LLTs in a MedDRA version.') - # Version Upgrade Inputs - list_of_terms_upgrade = gr.Dropdown( - ["Pain in the head", "Injection site joint inflammation", "Bone chip removal"] - , label="LLTs (comma-separated, max 5)" - , info="Enter your LLTs here (example: Injection site joint inflammation) " - "or choose from the dropdown preset", - allow_custom_value=True) - - version_upgrade = gr.Dropdown(label="To MedDRA Version", choices=["20.0", "26.0"]) - - # Output - api_response_upgrade = gr.Textbox(label="Version Upgrade Response") - - # Button to trigger API call and Binding API call function to button for Version Upgrade - submit_button_upgrade = gr.Button("Upgrade to Selected MedDRA Version") - submit_button_upgrade.click(upgrade_caller, - inputs=[universal_api_key_input, list_of_terms_upgrade, - version_upgrade], outputs=api_response_upgrade) - - with gr.Tab("Safeterm Current Check"): # Currency Checker section - gr.Markdown("### Safeterm Current Version Check") - gr.Markdown('Checks if LLTs are current or not in a particular MedDRA version') - # No Change Version Check Inputs - list_of_terms_validity = gr.Dropdown(["Anemia iron deficiency", "COVID-19"] - , label="LLTs (comma-separated, max 5)" - , info="Enter your LLTs here, e.g.: Injection site " - "joint inflammation or choose from the dropdown preset", - allow_custom_value=True) - version_check = gr.Dropdown(label="To MedDRA Version", choices=["20.0", "26.0"]) - - # Output for No Change Version Check - api_response_current = gr.Textbox(label="Current Check Response") - - # Button to trigger API call and Binding API call function to button for No Change Check - submit_button_currency = gr.Button("Check if LLT is current or not") - submit_button_currency.click(llt_current_caller, - inputs=[universal_api_key_input, list_of_terms_validity, version_check], - outputs=api_response_current) - - with gr.Tab("Request an API key"): - # gr.Markdown("### Safeterm Settings") - gr.Markdown("### Request an API key") - free_demo = gr.Markdown( - "Submit your request for a free demo of our API (Expires in 30 days. 50 terms limit)") - # gr.Markdown("### MedDRA Dictionary") - version = gr.Markdown("Safeterm Model v-082023") - language = gr.Markdown("Language: English") # Dropdown for language - - gr.Markdown("### Contact information") - email = gr.Textbox(label="Email", placeholder="Enter your professional email address...") - name = gr.Textbox(label="First and Last Name", placeholder="Enter your full name...") - organization = gr.Textbox(label="Organization", placeholder="Enter your organization name and details...") - - gr.Markdown("### Terms of use") - # MedDRA_license = gr.Checkbox(label="I confirm that my organization has a valid MedDRA license.") - with gr.Row(): - MedDRA_license = gr.Checkbox(label="I confirm that my organization has a valid MedDRA license: ") - gr.HTML("""[link]""") - with gr.Row(): - agree_terms = gr.Checkbox(label="I agree to Safeterm terms of use: ", min_width=1, scale=1) - gr.HTML("""[link] """) - - with gr.Row(): - save_data = gr.Checkbox( - label="I consent to the storage of my personal data for training and communication purposes.") - gr.HTML(""" """) - - feedback_textbox = gr.Markdown(label="Feedback") - - # Button for API key request and Binding function to button - api_key_button = gr.Button("Submit request for API key") - api_key_button.click( - request_api_key_form, - inputs=[email, name, organization, MedDRA_license, agree_terms, save_data], - outputs=feedback_textbox - ) - with gr.Row(): - gr.Markdown( - "Safeterm API is distributed by ClinBAY Limited. \t For any enquiry, feel free to contact us at " - "info@clinbay.com") - -demo.launch() diff --git a/spaces/Conner/IAPdemo/app.py b/spaces/Conner/IAPdemo/app.py deleted file mode 100644 index 73c910113d114ac91401de11514ae5a0cada706a..0000000000000000000000000000000000000000 --- a/spaces/Conner/IAPdemo/app.py +++ /dev/null @@ -1,46 +0,0 @@ -from tensorflow import keras -from keras.models import load_model -from PIL import Image, ImageOps -import numpy as np -import gradio as gr -import rasterio -from rasterio.enums import Resampling - - -# Load the model -model = load_model('keras_model.h5') - - - -def greet(name): - return "Hello " + prediction + "!!" - -def predict(image): - -# Create the array of the right shape to feed into the keras model -# The 'length' or number of images you can put into the array is -# determined by the first position in the shape tuple, in this case 1. - data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) -# Replace this with the path to your image - -#resize the image to a 224x224 with the same strategy as in TM2: -#resizing the image to be at least 224x224 and then cropping from the center - size = (224, 224) - image = gr.inputs.Image() - image = ImageOps.fit(image, size, Resampling.LANCZOS) - -#turn the image into a numpy array - image_array = np.asarray(image) -# Normalize the image - normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 -# Load the image into the array - data[0] = normalized_image_array - -# run the inference - prediction = model.predict(data) - gr.print(prediction) - return prediction - - -iface = gr.Interface(fn=predict, inputs="image", outputs="text") -iface.launch() diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/csrc/cpu/vision.h b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/csrc/cpu/vision.h deleted file mode 100644 index 19539caf9c5aa8b8025f786c3e54e23de300cf5e..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/csrc/cpu/vision.h +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -#pragma once -#include - - -at::Tensor ROIAlign_forward_cpu(const at::Tensor& input, - const at::Tensor& rois, - const float spatial_scale, - const int pooled_height, - const int pooled_width, - const int sampling_ratio); - - -at::Tensor nms_cpu(const at::Tensor& dets, - const at::Tensor& scores, - const float threshold); -at::Tensor -dcn_v2_cpu_forward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const int kernel_h, - const int kernel_w, - const int stride_h, - const int stride_w, - const int pad_h, - const int pad_w, - const int dilation_h, - const int dilation_w, - const int deformable_group); - -std::vector -dcn_v2_cpu_backward(const at::Tensor &input, - const at::Tensor &weight, - const at::Tensor &bias, - const at::Tensor &offset, - const at::Tensor &mask, - const at::Tensor &grad_output, - int kernel_h, int kernel_w, - int stride_h, int stride_w, - int pad_h, int pad_w, - int dilation_h, int dilation_w, - int deformable_group); - - -std::tuple -dcn_v2_psroi_pooling_cpu_forward(const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std); - -std::tuple -dcn_v2_psroi_pooling_cpu_backward(const at::Tensor &out_grad, - const at::Tensor &input, - const at::Tensor &bbox, - const at::Tensor &trans, - const at::Tensor &top_count, - const int no_trans, - const float spatial_scale, - const int output_dim, - const int group_size, - const int pooled_size, - const int part_size, - const int sample_per_part, - const float trans_std); diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/plugin_registry.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/plugin_registry.py deleted file mode 100644 index 37d3db222ef2c7920628971a92e863d9915514c6..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/utils/plugin_registry.py +++ /dev/null @@ -1,228 +0,0 @@ -import sys -from typing import Any, Dict, List, Optional, Generic, TypeVar, cast -from types import TracebackType - -if sys.version_info >= (3, 8): - from importlib.metadata import entry_points -else: - from importlib_metadata import entry_points - -from toolz import curry - - -PluginType = TypeVar("PluginType") - - -class NoSuchEntryPoint(Exception): - def __init__(self, group, name): - self.group = group - self.name = name - - def __str__(self): - return f"No {self.name!r} entry point found in group {self.group!r}" - - -class PluginEnabler: - """Context manager for enabling plugins - - This object lets you use enable() as a context manager to - temporarily enable a given plugin:: - - with plugins.enable('name'): - do_something() # 'name' plugin temporarily enabled - # plugins back to original state - """ - - def __init__(self, registry: "PluginRegistry", name: str, **options): - self.registry = registry # type: PluginRegistry - self.name = name # type: str - self.options = options # type: Dict[str, Any] - self.original_state = registry._get_state() # type: Dict[str, Any] - self.registry._enable(name, **options) - - def __enter__(self) -> "PluginEnabler": - return self - - def __exit__(self, typ: type, value: Exception, traceback: TracebackType) -> None: - self.registry._set_state(self.original_state) - - def __repr__(self) -> str: - return "{}.enable({!r})".format(self.registry.__class__.__name__, self.name) - - -class PluginRegistry(Generic[PluginType]): - """A registry for plugins. - - This is a plugin registry that allows plugins to be loaded/registered - in two ways: - - 1. Through an explicit call to ``.register(name, value)``. - 2. By looking for other Python packages that are installed and provide - a setuptools entry point group. - - When you create an instance of this class, provide the name of the - entry point group to use:: - - reg = PluginRegister('my_entrypoint_group') - - """ - - # this is a mapping of name to error message to allow custom error messages - # in case an entrypoint is not found - entrypoint_err_messages = {} # type: Dict[str, str] - - # global settings is a key-value mapping of settings that are stored globally - # in the registry rather than passed to the plugins - _global_settings = {} # type: Dict[str, Any] - - def __init__(self, entry_point_group: str = "", plugin_type: type = object): - """Create a PluginRegistry for a named entry point group. - - Parameters - ========== - entry_point_group: str - The name of the entry point group. - plugin_type: object - A type that will optionally be used for runtime type checking of - loaded plugins using isinstance. - """ - self.entry_point_group = entry_point_group # type: str - self.plugin_type = plugin_type # type: Optional[type] - self._active = None # type: Optional[PluginType] - self._active_name = "" # type: str - self._plugins = {} # type: Dict[str, PluginType] - self._options = {} # type: Dict[str, Any] - self._global_settings = self.__class__._global_settings.copy() # type: dict - - def register(self, name: str, value: Optional[PluginType]) -> Optional[PluginType]: - """Register a plugin by name and value. - - This method is used for explicit registration of a plugin and shouldn't be - used to manage entry point managed plugins, which are auto-loaded. - - Parameters - ========== - name: str - The name of the plugin. - value: PluginType or None - The actual plugin object to register or None to unregister that plugin. - - Returns - ======= - plugin: PluginType or None - The plugin that was registered or unregistered. - """ - if value is None: - return self._plugins.pop(name, None) - else: - assert isinstance(value, self.plugin_type) # type: ignore[arg-type] # Should ideally be fixed by better annotating plugin_type - self._plugins[name] = value - return value - - def names(self) -> List[str]: - """List the names of the registered and entry points plugins.""" - exts = list(self._plugins.keys()) - e_points = importlib_metadata_get(self.entry_point_group) - more_exts = [ep.name for ep in e_points] - exts.extend(more_exts) - return sorted(set(exts)) - - def _get_state(self) -> Dict[str, Any]: - """Return a dictionary representing the current state of the registry""" - return { - "_active": self._active, - "_active_name": self._active_name, - "_plugins": self._plugins.copy(), - "_options": self._options.copy(), - "_global_settings": self._global_settings.copy(), - } - - def _set_state(self, state: Dict[str, Any]) -> None: - """Reset the state of the registry""" - assert set(state.keys()) == { - "_active", - "_active_name", - "_plugins", - "_options", - "_global_settings", - } - for key, val in state.items(): - setattr(self, key, val) - - def _enable(self, name: str, **options) -> None: - if name not in self._plugins: - try: - (ep,) = [ - ep - for ep in importlib_metadata_get(self.entry_point_group) - if ep.name == name - ] - except ValueError as err: - if name in self.entrypoint_err_messages: - raise ValueError(self.entrypoint_err_messages[name]) from err - else: - raise NoSuchEntryPoint(self.entry_point_group, name) from err - value = cast(PluginType, ep.load()) - self.register(name, value) - self._active_name = name - self._active = self._plugins[name] - for key in set(options.keys()) & set(self._global_settings.keys()): - self._global_settings[key] = options.pop(key) - self._options = options - - def enable(self, name: Optional[str] = None, **options) -> PluginEnabler: - """Enable a plugin by name. - - This can be either called directly, or used as a context manager. - - Parameters - ---------- - name : string (optional) - The name of the plugin to enable. If not specified, then use the - current active name. - **options : - Any additional parameters will be passed to the plugin as keyword - arguments - - Returns - ------- - PluginEnabler: - An object that allows enable() to be used as a context manager - """ - if name is None: - name = self.active - return PluginEnabler(self, name, **options) - - @property - def active(self) -> str: - """Return the name of the currently active plugin""" - return self._active_name - - @property - def options(self) -> Dict[str, Any]: - """Return the current options dictionary""" - return self._options - - def get(self) -> Optional[PluginType]: - """Return the currently active plugin.""" - if self._options: - return curry(self._active, **self._options) - else: - return self._active - - def __repr__(self) -> str: - return "{}(active={!r}, registered={!r})" "".format( - self.__class__.__name__, self._active_name, list(self.names()) - ) - - -def importlib_metadata_get(group): - ep = entry_points() - # 'select' was introduced in Python 3.10 and 'get' got deprecated - # We don't check for Python version here as by checking with hasattr we - # also get compatibility with the importlib_metadata package which had a different - # deprecation cycle for 'get' - if hasattr(ep, "select"): - return ep.select(group=group) - else: - return ep.get(group, []) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-1e03cd90.css b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-1e03cd90.css deleted file mode 100644 index 6692555db405e6eb83d0671b1ef9922ee30770d3..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-1e03cd90.css +++ /dev/null @@ -1 +0,0 @@ -.preview.svelte-w0jac3.svelte-w0jac3{display:flex;position:absolute;inset:0;flex-direction:column;z-index:var(--layer-2);backdrop-filter:blur(8px);background:var(--background-fill-primary);height:var(--size-full)}.fixed-height.svelte-w0jac3.svelte-w0jac3{min-height:var(--size-80);max-height:55vh}@media (min-width: 1280px){.fixed-height.svelte-w0jac3.svelte-w0jac3{min-height:450px}}.preview.svelte-w0jac3 img.svelte-w0jac3{width:var(--size-full);height:calc(var(--size-full) - 60px);object-fit:contain}.preview.svelte-w0jac3 img.with-caption.svelte-w0jac3{height:calc(var(--size-full) - 80px)}.caption.svelte-w0jac3.svelte-w0jac3{padding:var(--size-2) var(--size-3);overflow:hidden;color:var(--block-label-text-color);font-weight:var(--weight-semibold);text-align:center;text-overflow:ellipsis;white-space:nowrap}.thumbnails.svelte-w0jac3.svelte-w0jac3{display:flex;position:absolute;bottom:0;justify-content:center;align-items:center;gap:var(--spacing-lg);width:var(--size-full);height:var(--size-14);overflow-x:scroll}.thumbnail-item.svelte-w0jac3.svelte-w0jac3{--ring-color:transparent;position:relative;box-shadow:0 0 0 2px var(--ring-color),var(--shadow-drop);border:1px solid var(--border-color-primary);border-radius:var(--button-small-radius);background:var(--background-fill-secondary);aspect-ratio:var(--ratio-square);width:var(--size-full);height:var(--size-full);overflow:clip}.thumbnail-item.svelte-w0jac3.svelte-w0jac3:hover{--ring-color:var(--color-accent);filter:brightness(1.1)}.thumbnail-item.selected.svelte-w0jac3.svelte-w0jac3{--ring-color:var(--color-accent)}.thumbnail-small.svelte-w0jac3.svelte-w0jac3{flex:none;transform:scale(.9);transition:75ms;width:var(--size-9);height:var(--size-9)}.thumbnail-small.selected.svelte-w0jac3.svelte-w0jac3{--ring-color:var(--color-accent);transform:scale(1);border-color:var(--color-accent)}.thumbnail-small.svelte-w0jac3>img.svelte-w0jac3{width:var(--size-full);height:var(--size-full);overflow:hidden;object-fit:var(--object-fit)}.grid-wrap.svelte-w0jac3.svelte-w0jac3{position:relative;padding:var(--size-2);height:var(--size-full);overflow-y:auto}.grid-container.svelte-w0jac3.svelte-w0jac3{display:grid;position:relative;grid-template-rows:var(--grid-rows);grid-template-columns:var(--grid-cols);gap:var(--spacing-lg)}@media (min-width: 640px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--sm-grid-cols)}}@media (min-width: 768px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--md-grid-cols)}}@media (min-width: 1024px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--lg-grid-cols)}}@media (min-width: 1280px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--xl-grid-cols)}}@media (min-width: 1536px){.grid-container.svelte-w0jac3.svelte-w0jac3{grid-template-columns:var(--2xl-grid-cols)}}.thumbnail-lg.svelte-w0jac3>img.svelte-w0jac3{width:var(--size-full);height:var(--size-full);overflow:hidden;object-fit:var(--object-fit)}.thumbnail-lg.svelte-w0jac3:hover .caption-label.svelte-w0jac3{opacity:.5}.caption-label.svelte-w0jac3.svelte-w0jac3{position:absolute;right:var(--block-label-margin);bottom:var(--block-label-margin);z-index:var(--layer-1);border-top:1px solid var(--border-color-primary);border-left:1px solid var(--border-color-primary);border-radius:var(--block-label-radius);background:var(--background-fill-secondary);padding:var(--block-label-padding);max-width:80%;overflow:hidden;font-size:var(--block-label-text-size);text-align:left;text-overflow:ellipsis;white-space:nowrap}.icon-button.svelte-w0jac3.svelte-w0jac3{position:absolute;top:0;right:0;z-index:var(--layer-1)} diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-28bbfef4.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-28bbfef4.js deleted file mode 100644 index 2a4bcaa57ee83c3546d34ed1402158e00c5c76a6..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-28bbfef4.js +++ /dev/null @@ -1,3 +0,0 @@ -import{S as F,e as G,s as J,J as Z,K as _,p as k,M as w,n as A,A as v,N as T,B as oe,av as P,G as N,O as C,V as K,P as I,R as q,Q as S,a1 as ue,U as j,L,k as O,o as V,z as H,v as M,x as z,E as _e,ae as me,m as de,q as ge,r as he,u as Q,y as U,F as be}from"./index-1d65707a.js";import{g as ke}from"./color-90ab3aab.js";import{B as ve}from"./Button-f155035a.js";import{B as pe}from"./BlockLabel-66866176.js";import{E as we}from"./Empty-eec13822.js";function ye(t){let e,n,l;return{c(){e=Z("svg"),n=Z("path"),l=Z("path"),_(n,"fill","currentColor"),_(n,"d","M12 15H5a3 3 0 0 1-3-3v-2a3 3 0 0 1 3-3h5V5a1 1 0 0 0-1-1H3V2h6a3 3 0 0 1 3 3zM5 9a1 1 0 0 0-1 1v2a1 1 0 0 0 1 1h5V9zm15 14v2a1 1 0 0 0 1 1h5v-4h-5a1 1 0 0 0-1 1z"),_(l,"fill","currentColor"),_(l,"d","M2 30h28V2Zm26-2h-7a3 3 0 0 1-3-3v-2a3 3 0 0 1 3-3h5v-2a1 1 0 0 0-1-1h-6v-2h6a3 3 0 0 1 3 3Z"),_(e,"xmlns","http://www.w3.org/2000/svg"),_(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),_(e,"aria-hidden","true"),_(e,"role","img"),_(e,"class","iconify iconify--carbon"),_(e,"width","100%"),_(e,"height","100%"),_(e,"preserveAspectRatio","xMidYMid meet"),_(e,"viewBox","0 0 32 32")},m(s,o){k(s,e,o),w(e,n),w(e,l)},p:A,i:A,o:A,d(s){s&&v(e)}}}class ae extends F{constructor(e){super(),G(this,e,null,ye,J,{})}}function Y(t,e,n){const l=t.slice();l[18]=e[n][0],l[24]=e[n][1];const s=typeof l[24]=="string"?parseInt(l[24]):l[24];return l[25]=s,l}function W(t,e,n){const l=t.slice();return l[18]=e[n][0],l[19]=e[n][1],l[21]=n,l}function X(t,e,n){const l=t.slice();return l[19]=e[n][0],l[22]=e[n][1],l[21]=n,l}function He(t){let e,n,l=t[1]&&x(),s=N(t[0]),o=[];for(let a=0;a-1 0 +1",_(e,"class","color-legend svelte-19on2m6"),_(e,"data-testid","highlighted-text:color-legend")},m(n,l){k(n,e,l)},d(n){n&&v(e)}}}function $(t){let e,n,l=t[18]+"",s,o,a;return{c(){e=T("span"),n=T("span"),s=I(l),o=C(),_(n,"class","text svelte-19on2m6"),_(e,"class","textspan score-text svelte-19on2m6"),_(e,"style",a="background-color: rgba("+(t[25]<0?"128, 90, 213,"+-t[25]:"239, 68, 60,"+t[25])+")")},m(r,i){k(r,e,i),w(e,n),w(n,s),w(e,o)},p(r,i){i&1&&l!==(l=r[18]+"")&&q(s,l),i&1&&a!==(a="background-color: rgba("+(r[25]<0?"128, 90, 213,"+-r[25]:"239, 68, 60,"+r[25])+")")&&_(e,"style",a)},d(r){r&&v(e)}}}function ee(t){let e,n=N(Object.entries(t[3])),l=[];for(let s=0;sc(h),E=h=>c(h),m=()=>g(),ie=()=>g(),re=(h,p,y)=>{d("select",{index:h,value:[p,y]})};return t.$$set=h=>{"value"in h&&n(0,s=h.value),"show_legend"in h&&n(1,o=h.show_legend),"color_map"in h&&n(9,a=h.color_map),"selectable"in h&&n(2,r=h.selectable)},t.$$.update=()=>{if(t.$$.dirty&513){let h=function(){for(const p in a){const y=a[p].trim();y in P?n(3,f[p]=P[y],f):n(3,f[p]={primary:l?b(a[p],1):a[p],secondary:l?b(a[p],.5):a[p]},f)}};if(a||n(9,a={}),s.length>0){for(let[p,y]of s)if(y!==null)if(typeof y=="string"){if(n(5,B="categories"),!(y in a)){let D=ke(Object.keys(a).length);n(9,a[y]=D,a)}}else n(5,B="scores")}h()}},[s,o,r,f,u,B,d,c,g,a,R,E,m,ie,re]}class je extends F{constructor(e){super(),G(this,e,Be,Me,J,{value:0,show_legend:1,color_map:9,selectable:2})}}function se(t){let e,n;return e=new pe({props:{Icon:ae,label:t[6],float:!1,disable:t[7]===!1}}),{c(){O(e.$$.fragment)},m(l,s){V(e,l,s),n=!0},p(l,s){const o={};s&64&&(o.label=l[6]),s&128&&(o.disable=l[7]===!1),e.$set(o)},i(l){n||(H(e.$$.fragment,l),n=!0)},o(l){M(e.$$.fragment,l),n=!1},d(l){z(e,l)}}}function Ce(t){let e,n;return e=new we({props:{$$slots:{default:[Ne]},$$scope:{ctx:t}}}),{c(){O(e.$$.fragment)},m(l,s){V(e,l,s),n=!0},p(l,s){const o={};s&32768&&(o.$$scope={dirty:s,ctx:l}),e.$set(o)},i(l){n||(H(e.$$.fragment,l),n=!0)},o(l){M(e.$$.fragment,l),n=!1},d(l){z(e,l)}}}function Ee(t){let e,n;return e=new je({props:{selectable:t[10],value:t[4],show_legend:t[5],color_map:t[0]}}),e.$on("select",t[13]),{c(){O(e.$$.fragment)},m(l,s){V(e,l,s),n=!0},p(l,s){const o={};s&1024&&(o.selectable=l[10]),s&16&&(o.value=l[4]),s&32&&(o.show_legend=l[5]),s&1&&(o.color_map=l[0]),e.$set(o)},i(l){n||(H(e.$$.fragment,l),n=!0)},o(l){M(e.$$.fragment,l),n=!1},d(l){z(e,l)}}}function Ne(t){let e,n;return e=new ae({}),{c(){O(e.$$.fragment)},m(l,s){V(e,l,s),n=!0},i(l){n||(H(e.$$.fragment,l),n=!0)},o(l){M(e.$$.fragment,l),n=!1},d(l){z(e,l)}}}function Oe(t){let e,n,l,s,o,a,r;const i=[t[11]];let f={};for(let c=0;c{u=null}),U());let E=s;s=B(c),s===E?d[s].p(c,g):(Q(),M(d[E],1,1,()=>{d[E]=null}),U(),o=d[s],o?o.p(c,g):(o=d[s]=b[s](c),o.c()),H(o,1),o.m(a.parentNode,a))},i(c){r||(H(e.$$.fragment,c),H(u),H(o),r=!0)},o(c){M(e.$$.fragment,c),M(u),M(o),r=!1},d(c){c&&(v(n),v(l),v(a)),z(e,c),u&&u.d(c),d[s].d(c)}}}function Ve(t){let e,n;return e=new ve({props:{test_id:"highlighted-text",visible:t[3],elem_id:t[1],elem_classes:t[2],padding:!1,container:t[7],scale:t[8],min_width:t[9],$$slots:{default:[Oe]},$$scope:{ctx:t}}}),{c(){O(e.$$.fragment)},m(l,s){V(e,l,s),n=!0},p(l,[s]){const o={};s&8&&(o.visible=l[3]),s&2&&(o.elem_id=l[1]),s&4&&(o.elem_classes=l[2]),s&128&&(o.container=l[7]),s&256&&(o.scale=l[8]),s&512&&(o.min_width=l[9]),s&36081&&(o.$$scope={dirty:s,ctx:l}),e.$set(o)},i(l){n||(H(e.$$.fragment,l),n=!0)},o(l){M(e.$$.fragment,l),n=!1},d(l){z(e,l)}}}function ze(t,e,n){let{elem_id:l=""}=e,{elem_classes:s=[]}=e,{visible:o=!0}=e,{value:a}=e,r,{show_legend:i}=e,{color_map:f={}}=e,{label:u="Highlighted Text"}=e,{container:b=!0}=e,{scale:d=null}=e,{min_width:B=void 0}=e,{selectable:c=!1}=e,{loading_status:g}=e;const R=oe();function E(m){be.call(this,t,m)}return t.$$set=m=>{"elem_id"in m&&n(1,l=m.elem_id),"elem_classes"in m&&n(2,s=m.elem_classes),"visible"in m&&n(3,o=m.visible),"value"in m&&n(4,a=m.value),"show_legend"in m&&n(5,i=m.show_legend),"color_map"in m&&n(0,f=m.color_map),"label"in m&&n(6,u=m.label),"container"in m&&n(7,b=m.container),"scale"in m&&n(8,d=m.scale),"min_width"in m&&n(9,B=m.min_width),"selectable"in m&&n(10,c=m.selectable),"loading_status"in m&&n(11,g=m.loading_status)},t.$$.update=()=>{t.$$.dirty&1&&!f&&Object.keys(f).length&&n(0,f),t.$$.dirty&4112&&a!==r&&(n(12,r=a),R("change"))},[f,l,s,o,a,i,u,b,d,B,c,g,r,E]}class Re extends F{constructor(e){super(),G(this,e,ze,Ve,J,{elem_id:1,elem_classes:2,visible:3,value:4,show_legend:5,color_map:0,label:6,container:7,scale:8,min_width:9,selectable:10,loading_status:11})}}const De=Re,Ze=["static"],Fe=t=>({type:{payload:"Array<[string, string | number]>"},description:{payload:"list of text spans and corresponding label / value"}});export{De as Component,Fe as document,Ze as modes}; -//# sourceMappingURL=index-28bbfef4.js.map diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_models.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_models.py deleted file mode 100644 index e0e5278cc052e2f9a6d0af0a1cb2107b03de98f4..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_models.py +++ /dev/null @@ -1,1209 +0,0 @@ -import datetime -import email.message -import json as jsonlib -import typing -import urllib.request -from collections.abc import Mapping -from http.cookiejar import Cookie, CookieJar - -from ._content import ByteStream, UnattachedStream, encode_request, encode_response -from ._decoders import ( - SUPPORTED_DECODERS, - ByteChunker, - ContentDecoder, - IdentityDecoder, - LineDecoder, - MultiDecoder, - TextChunker, - TextDecoder, -) -from ._exceptions import ( - CookieConflict, - HTTPStatusError, - RequestNotRead, - ResponseNotRead, - StreamClosed, - StreamConsumed, - request_context, -) -from ._multipart import get_multipart_boundary_from_content_type -from ._status_codes import codes -from ._types import ( - AsyncByteStream, - CookieTypes, - HeaderTypes, - QueryParamTypes, - RequestContent, - RequestData, - RequestExtensions, - RequestFiles, - ResponseContent, - ResponseExtensions, - SyncByteStream, -) -from ._urls import URL -from ._utils import ( - guess_json_utf, - is_known_encoding, - normalize_header_key, - normalize_header_value, - obfuscate_sensitive_headers, - parse_content_type_charset, - parse_header_links, -) - - -class Headers(typing.MutableMapping[str, str]): - """ - HTTP headers, as a case-insensitive multi-dict. - """ - - def __init__( - self, - headers: typing.Optional[HeaderTypes] = None, - encoding: typing.Optional[str] = None, - ) -> None: - if headers is None: - self._list = [] # type: typing.List[typing.Tuple[bytes, bytes, bytes]] - elif isinstance(headers, Headers): - self._list = list(headers._list) - elif isinstance(headers, Mapping): - self._list = [ - ( - normalize_header_key(k, lower=False, encoding=encoding), - normalize_header_key(k, lower=True, encoding=encoding), - normalize_header_value(v, encoding), - ) - for k, v in headers.items() - ] - else: - self._list = [ - ( - normalize_header_key(k, lower=False, encoding=encoding), - normalize_header_key(k, lower=True, encoding=encoding), - normalize_header_value(v, encoding), - ) - for k, v in headers - ] - - self._encoding = encoding - - @property - def encoding(self) -> str: - """ - Header encoding is mandated as ascii, but we allow fallbacks to utf-8 - or iso-8859-1. - """ - if self._encoding is None: - for encoding in ["ascii", "utf-8"]: - for key, value in self.raw: - try: - key.decode(encoding) - value.decode(encoding) - except UnicodeDecodeError: - break - else: - # The else block runs if 'break' did not occur, meaning - # all values fitted the encoding. - self._encoding = encoding - break - else: - # The ISO-8859-1 encoding covers all 256 code points in a byte, - # so will never raise decode errors. - self._encoding = "iso-8859-1" - return self._encoding - - @encoding.setter - def encoding(self, value: str) -> None: - self._encoding = value - - @property - def raw(self) -> typing.List[typing.Tuple[bytes, bytes]]: - """ - Returns a list of the raw header items, as byte pairs. - """ - return [(raw_key, value) for raw_key, _, value in self._list] - - def keys(self) -> typing.KeysView[str]: - return {key.decode(self.encoding): None for _, key, value in self._list}.keys() - - def values(self) -> typing.ValuesView[str]: - values_dict: typing.Dict[str, str] = {} - for _, key, value in self._list: - str_key = key.decode(self.encoding) - str_value = value.decode(self.encoding) - if str_key in values_dict: - values_dict[str_key] += f", {str_value}" - else: - values_dict[str_key] = str_value - return values_dict.values() - - def items(self) -> typing.ItemsView[str, str]: - """ - Return `(key, value)` items of headers. Concatenate headers - into a single comma separated value when a key occurs multiple times. - """ - values_dict: typing.Dict[str, str] = {} - for _, key, value in self._list: - str_key = key.decode(self.encoding) - str_value = value.decode(self.encoding) - if str_key in values_dict: - values_dict[str_key] += f", {str_value}" - else: - values_dict[str_key] = str_value - return values_dict.items() - - def multi_items(self) -> typing.List[typing.Tuple[str, str]]: - """ - Return a list of `(key, value)` pairs of headers. Allow multiple - occurrences of the same key without concatenating into a single - comma separated value. - """ - return [ - (key.decode(self.encoding), value.decode(self.encoding)) - for _, key, value in self._list - ] - - def get(self, key: str, default: typing.Any = None) -> typing.Any: - """ - Return a header value. If multiple occurrences of the header occur - then concatenate them together with commas. - """ - try: - return self[key] - except KeyError: - return default - - def get_list(self, key: str, split_commas: bool = False) -> typing.List[str]: - """ - Return a list of all header values for a given key. - If `split_commas=True` is passed, then any comma separated header - values are split into multiple return strings. - """ - get_header_key = key.lower().encode(self.encoding) - - values = [ - item_value.decode(self.encoding) - for _, item_key, item_value in self._list - if item_key.lower() == get_header_key - ] - - if not split_commas: - return values - - split_values = [] - for value in values: - split_values.extend([item.strip() for item in value.split(",")]) - return split_values - - def update(self, headers: typing.Optional[HeaderTypes] = None) -> None: # type: ignore - headers = Headers(headers) - for key in headers.keys(): - if key in self: - self.pop(key) - self._list.extend(headers._list) - - def copy(self) -> "Headers": - return Headers(self, encoding=self.encoding) - - def __getitem__(self, key: str) -> str: - """ - Return a single header value. - - If there are multiple headers with the same key, then we concatenate - them with commas. See: https://tools.ietf.org/html/rfc7230#section-3.2.2 - """ - normalized_key = key.lower().encode(self.encoding) - - items = [ - header_value.decode(self.encoding) - for _, header_key, header_value in self._list - if header_key == normalized_key - ] - - if items: - return ", ".join(items) - - raise KeyError(key) - - def __setitem__(self, key: str, value: str) -> None: - """ - Set the header `key` to `value`, removing any duplicate entries. - Retains insertion order. - """ - set_key = key.encode(self._encoding or "utf-8") - set_value = value.encode(self._encoding or "utf-8") - lookup_key = set_key.lower() - - found_indexes = [ - idx - for idx, (_, item_key, _) in enumerate(self._list) - if item_key == lookup_key - ] - - for idx in reversed(found_indexes[1:]): - del self._list[idx] - - if found_indexes: - idx = found_indexes[0] - self._list[idx] = (set_key, lookup_key, set_value) - else: - self._list.append((set_key, lookup_key, set_value)) - - def __delitem__(self, key: str) -> None: - """ - Remove the header `key`. - """ - del_key = key.lower().encode(self.encoding) - - pop_indexes = [ - idx - for idx, (_, item_key, _) in enumerate(self._list) - if item_key.lower() == del_key - ] - - if not pop_indexes: - raise KeyError(key) - - for idx in reversed(pop_indexes): - del self._list[idx] - - def __contains__(self, key: typing.Any) -> bool: - header_key = key.lower().encode(self.encoding) - return header_key in [key for _, key, _ in self._list] - - def __iter__(self) -> typing.Iterator[typing.Any]: - return iter(self.keys()) - - def __len__(self) -> int: - return len(self._list) - - def __eq__(self, other: typing.Any) -> bool: - try: - other_headers = Headers(other) - except ValueError: - return False - - self_list = [(key, value) for _, key, value in self._list] - other_list = [(key, value) for _, key, value in other_headers._list] - return sorted(self_list) == sorted(other_list) - - def __repr__(self) -> str: - class_name = self.__class__.__name__ - - encoding_str = "" - if self.encoding != "ascii": - encoding_str = f", encoding={self.encoding!r}" - - as_list = list(obfuscate_sensitive_headers(self.multi_items())) - as_dict = dict(as_list) - - no_duplicate_keys = len(as_dict) == len(as_list) - if no_duplicate_keys: - return f"{class_name}({as_dict!r}{encoding_str})" - return f"{class_name}({as_list!r}{encoding_str})" - - -class Request: - def __init__( - self, - method: typing.Union[str, bytes], - url: typing.Union["URL", str], - *, - params: typing.Optional[QueryParamTypes] = None, - headers: typing.Optional[HeaderTypes] = None, - cookies: typing.Optional[CookieTypes] = None, - content: typing.Optional[RequestContent] = None, - data: typing.Optional[RequestData] = None, - files: typing.Optional[RequestFiles] = None, - json: typing.Optional[typing.Any] = None, - stream: typing.Union[SyncByteStream, AsyncByteStream, None] = None, - extensions: typing.Optional[RequestExtensions] = None, - ): - self.method = ( - method.decode("ascii").upper() - if isinstance(method, bytes) - else method.upper() - ) - self.url = URL(url) - if params is not None: - self.url = self.url.copy_merge_params(params=params) - self.headers = Headers(headers) - self.extensions = {} if extensions is None else extensions - - if cookies: - Cookies(cookies).set_cookie_header(self) - - if stream is None: - content_type: typing.Optional[str] = self.headers.get("content-type") - headers, stream = encode_request( - content=content, - data=data, - files=files, - json=json, - boundary=get_multipart_boundary_from_content_type( - content_type=content_type.encode(self.headers.encoding) - if content_type - else None - ), - ) - self._prepare(headers) - self.stream = stream - # Load the request body, except for streaming content. - if isinstance(stream, ByteStream): - self.read() - else: - # There's an important distinction between `Request(content=...)`, - # and `Request(stream=...)`. - # - # Using `content=...` implies automatically populated `Host` and content - # headers, of either `Content-Length: ...` or `Transfer-Encoding: chunked`. - # - # Using `stream=...` will not automatically include *any* auto-populated headers. - # - # As an end-user you don't really need `stream=...`. It's only - # useful when: - # - # * Preserving the request stream when copying requests, eg for redirects. - # * Creating request instances on the *server-side* of the transport API. - self.stream = stream - - def _prepare(self, default_headers: typing.Dict[str, str]) -> None: - for key, value in default_headers.items(): - # Ignore Transfer-Encoding if the Content-Length has been set explicitly. - if key.lower() == "transfer-encoding" and "Content-Length" in self.headers: - continue - self.headers.setdefault(key, value) - - auto_headers: typing.List[typing.Tuple[bytes, bytes]] = [] - - has_host = "Host" in self.headers - has_content_length = ( - "Content-Length" in self.headers or "Transfer-Encoding" in self.headers - ) - - if not has_host and self.url.host: - auto_headers.append((b"Host", self.url.netloc)) - if not has_content_length and self.method in ("POST", "PUT", "PATCH"): - auto_headers.append((b"Content-Length", b"0")) - - self.headers = Headers(auto_headers + self.headers.raw) - - @property - def content(self) -> bytes: - if not hasattr(self, "_content"): - raise RequestNotRead() - return self._content - - def read(self) -> bytes: - """ - Read and return the request content. - """ - if not hasattr(self, "_content"): - assert isinstance(self.stream, typing.Iterable) - self._content = b"".join(self.stream) - if not isinstance(self.stream, ByteStream): - # If a streaming request has been read entirely into memory, then - # we can replace the stream with a raw bytes implementation, - # to ensure that any non-replayable streams can still be used. - self.stream = ByteStream(self._content) - return self._content - - async def aread(self) -> bytes: - """ - Read and return the request content. - """ - if not hasattr(self, "_content"): - assert isinstance(self.stream, typing.AsyncIterable) - self._content = b"".join([part async for part in self.stream]) - if not isinstance(self.stream, ByteStream): - # If a streaming request has been read entirely into memory, then - # we can replace the stream with a raw bytes implementation, - # to ensure that any non-replayable streams can still be used. - self.stream = ByteStream(self._content) - return self._content - - def __repr__(self) -> str: - class_name = self.__class__.__name__ - url = str(self.url) - return f"<{class_name}({self.method!r}, {url!r})>" - - def __getstate__(self) -> typing.Dict[str, typing.Any]: - return { - name: value - for name, value in self.__dict__.items() - if name not in ["extensions", "stream"] - } - - def __setstate__(self, state: typing.Dict[str, typing.Any]) -> None: - for name, value in state.items(): - setattr(self, name, value) - self.extensions = {} - self.stream = UnattachedStream() - - -class Response: - def __init__( - self, - status_code: int, - *, - headers: typing.Optional[HeaderTypes] = None, - content: typing.Optional[ResponseContent] = None, - text: typing.Optional[str] = None, - html: typing.Optional[str] = None, - json: typing.Any = None, - stream: typing.Union[SyncByteStream, AsyncByteStream, None] = None, - request: typing.Optional[Request] = None, - extensions: typing.Optional[ResponseExtensions] = None, - history: typing.Optional[typing.List["Response"]] = None, - default_encoding: typing.Union[str, typing.Callable[[bytes], str]] = "utf-8", - ): - self.status_code = status_code - self.headers = Headers(headers) - - self._request: typing.Optional[Request] = request - - # When follow_redirects=False and a redirect is received, - # the client will set `response.next_request`. - self.next_request: typing.Optional[Request] = None - - self.extensions = {} if extensions is None else extensions - self.history = [] if history is None else list(history) - - self.is_closed = False - self.is_stream_consumed = False - - self.default_encoding = default_encoding - - if stream is None: - headers, stream = encode_response(content, text, html, json) - self._prepare(headers) - self.stream = stream - if isinstance(stream, ByteStream): - # Load the response body, except for streaming content. - self.read() - else: - # There's an important distinction between `Response(content=...)`, - # and `Response(stream=...)`. - # - # Using `content=...` implies automatically populated content headers, - # of either `Content-Length: ...` or `Transfer-Encoding: chunked`. - # - # Using `stream=...` will not automatically include any content headers. - # - # As an end-user you don't really need `stream=...`. It's only - # useful when creating response instances having received a stream - # from the transport API. - self.stream = stream - - self._num_bytes_downloaded = 0 - - def _prepare(self, default_headers: typing.Dict[str, str]) -> None: - for key, value in default_headers.items(): - # Ignore Transfer-Encoding if the Content-Length has been set explicitly. - if key.lower() == "transfer-encoding" and "content-length" in self.headers: - continue - self.headers.setdefault(key, value) - - @property - def elapsed(self) -> datetime.timedelta: - """ - Returns the time taken for the complete request/response - cycle to complete. - """ - if not hasattr(self, "_elapsed"): - raise RuntimeError( - "'.elapsed' may only be accessed after the response " - "has been read or closed." - ) - return self._elapsed - - @elapsed.setter - def elapsed(self, elapsed: datetime.timedelta) -> None: - self._elapsed = elapsed - - @property - def request(self) -> Request: - """ - Returns the request instance associated to the current response. - """ - if self._request is None: - raise RuntimeError( - "The request instance has not been set on this response." - ) - return self._request - - @request.setter - def request(self, value: Request) -> None: - self._request = value - - @property - def http_version(self) -> str: - try: - http_version: bytes = self.extensions["http_version"] - except KeyError: - return "HTTP/1.1" - else: - return http_version.decode("ascii", errors="ignore") - - @property - def reason_phrase(self) -> str: - try: - reason_phrase: bytes = self.extensions["reason_phrase"] - except KeyError: - return codes.get_reason_phrase(self.status_code) - else: - return reason_phrase.decode("ascii", errors="ignore") - - @property - def url(self) -> URL: - """ - Returns the URL for which the request was made. - """ - return self.request.url - - @property - def content(self) -> bytes: - if not hasattr(self, "_content"): - raise ResponseNotRead() - return self._content - - @property - def text(self) -> str: - if not hasattr(self, "_text"): - content = self.content - if not content: - self._text = "" - else: - decoder = TextDecoder(encoding=self.encoding or "utf-8") - self._text = "".join([decoder.decode(self.content), decoder.flush()]) - return self._text - - @property - def encoding(self) -> typing.Optional[str]: - """ - Return an encoding to use for decoding the byte content into text. - The priority for determining this is given by... - - * `.encoding = <>` has been set explicitly. - * The encoding as specified by the charset parameter in the Content-Type header. - * The encoding as determined by `default_encoding`, which may either be - a string like "utf-8" indicating the encoding to use, or may be a callable - which enables charset autodetection. - """ - if not hasattr(self, "_encoding"): - encoding = self.charset_encoding - if encoding is None or not is_known_encoding(encoding): - if isinstance(self.default_encoding, str): - encoding = self.default_encoding - elif hasattr(self, "_content"): - encoding = self.default_encoding(self._content) - self._encoding = encoding or "utf-8" - return self._encoding - - @encoding.setter - def encoding(self, value: str) -> None: - self._encoding = value - - @property - def charset_encoding(self) -> typing.Optional[str]: - """ - Return the encoding, as specified by the Content-Type header. - """ - content_type = self.headers.get("Content-Type") - if content_type is None: - return None - - return parse_content_type_charset(content_type) - - def _get_content_decoder(self) -> ContentDecoder: - """ - Returns a decoder instance which can be used to decode the raw byte - content, depending on the Content-Encoding used in the response. - """ - if not hasattr(self, "_decoder"): - decoders: typing.List[ContentDecoder] = [] - values = self.headers.get_list("content-encoding", split_commas=True) - for value in values: - value = value.strip().lower() - try: - decoder_cls = SUPPORTED_DECODERS[value] - decoders.append(decoder_cls()) - except KeyError: - continue - - if len(decoders) == 1: - self._decoder = decoders[0] - elif len(decoders) > 1: - self._decoder = MultiDecoder(children=decoders) - else: - self._decoder = IdentityDecoder() - - return self._decoder - - @property - def is_informational(self) -> bool: - """ - A property which is `True` for 1xx status codes, `False` otherwise. - """ - return codes.is_informational(self.status_code) - - @property - def is_success(self) -> bool: - """ - A property which is `True` for 2xx status codes, `False` otherwise. - """ - return codes.is_success(self.status_code) - - @property - def is_redirect(self) -> bool: - """ - A property which is `True` for 3xx status codes, `False` otherwise. - - Note that not all responses with a 3xx status code indicate a URL redirect. - - Use `response.has_redirect_location` to determine responses with a properly - formed URL redirection. - """ - return codes.is_redirect(self.status_code) - - @property - def is_client_error(self) -> bool: - """ - A property which is `True` for 4xx status codes, `False` otherwise. - """ - return codes.is_client_error(self.status_code) - - @property - def is_server_error(self) -> bool: - """ - A property which is `True` for 5xx status codes, `False` otherwise. - """ - return codes.is_server_error(self.status_code) - - @property - def is_error(self) -> bool: - """ - A property which is `True` for 4xx and 5xx status codes, `False` otherwise. - """ - return codes.is_error(self.status_code) - - @property - def has_redirect_location(self) -> bool: - """ - Returns True for 3xx responses with a properly formed URL redirection, - `False` otherwise. - """ - return ( - self.status_code - in ( - # 301 (Cacheable redirect. Method may change to GET.) - codes.MOVED_PERMANENTLY, - # 302 (Uncacheable redirect. Method may change to GET.) - codes.FOUND, - # 303 (Client should make a GET or HEAD request.) - codes.SEE_OTHER, - # 307 (Equiv. 302, but retain method) - codes.TEMPORARY_REDIRECT, - # 308 (Equiv. 301, but retain method) - codes.PERMANENT_REDIRECT, - ) - and "Location" in self.headers - ) - - def raise_for_status(self) -> None: - """ - Raise the `HTTPStatusError` if one occurred. - """ - request = self._request - if request is None: - raise RuntimeError( - "Cannot call `raise_for_status` as the request " - "instance has not been set on this response." - ) - - if self.is_success: - return - - if self.has_redirect_location: - message = ( - "{error_type} '{0.status_code} {0.reason_phrase}' for url '{0.url}'\n" - "Redirect location: '{0.headers[location]}'\n" - "For more information check: https://httpstatuses.com/{0.status_code}" - ) - else: - message = ( - "{error_type} '{0.status_code} {0.reason_phrase}' for url '{0.url}'\n" - "For more information check: https://httpstatuses.com/{0.status_code}" - ) - - status_class = self.status_code // 100 - error_types = { - 1: "Informational response", - 3: "Redirect response", - 4: "Client error", - 5: "Server error", - } - error_type = error_types.get(status_class, "Invalid status code") - message = message.format(self, error_type=error_type) - raise HTTPStatusError(message, request=request, response=self) - - def json(self, **kwargs: typing.Any) -> typing.Any: - if self.charset_encoding is None and self.content and len(self.content) > 3: - encoding = guess_json_utf(self.content) - if encoding is not None: - return jsonlib.loads(self.content.decode(encoding), **kwargs) - return jsonlib.loads(self.text, **kwargs) - - @property - def cookies(self) -> "Cookies": - if not hasattr(self, "_cookies"): - self._cookies = Cookies() - self._cookies.extract_cookies(self) - return self._cookies - - @property - def links(self) -> typing.Dict[typing.Optional[str], typing.Dict[str, str]]: - """ - Returns the parsed header links of the response, if any - """ - header = self.headers.get("link") - ldict = {} - if header: - links = parse_header_links(header) - for link in links: - key = link.get("rel") or link.get("url") - ldict[key] = link - return ldict - - @property - def num_bytes_downloaded(self) -> int: - return self._num_bytes_downloaded - - def __repr__(self) -> str: - return f"" - - def __getstate__(self) -> typing.Dict[str, typing.Any]: - return { - name: value - for name, value in self.__dict__.items() - if name not in ["extensions", "stream", "is_closed", "_decoder"] - } - - def __setstate__(self, state: typing.Dict[str, typing.Any]) -> None: - for name, value in state.items(): - setattr(self, name, value) - self.is_closed = True - self.extensions = {} - self.stream = UnattachedStream() - - def read(self) -> bytes: - """ - Read and return the response content. - """ - if not hasattr(self, "_content"): - self._content = b"".join(self.iter_bytes()) - return self._content - - def iter_bytes( - self, chunk_size: typing.Optional[int] = None - ) -> typing.Iterator[bytes]: - """ - A byte-iterator over the decoded response content. - This allows us to handle gzip, deflate, and brotli encoded responses. - """ - if hasattr(self, "_content"): - chunk_size = len(self._content) if chunk_size is None else chunk_size - for i in range(0, len(self._content), max(chunk_size, 1)): - yield self._content[i : i + chunk_size] - else: - decoder = self._get_content_decoder() - chunker = ByteChunker(chunk_size=chunk_size) - with request_context(request=self._request): - for raw_bytes in self.iter_raw(): - decoded = decoder.decode(raw_bytes) - for chunk in chunker.decode(decoded): - yield chunk - decoded = decoder.flush() - for chunk in chunker.decode(decoded): - yield chunk # pragma: no cover - for chunk in chunker.flush(): - yield chunk - - def iter_text( - self, chunk_size: typing.Optional[int] = None - ) -> typing.Iterator[str]: - """ - A str-iterator over the decoded response content - that handles both gzip, deflate, etc but also detects the content's - string encoding. - """ - decoder = TextDecoder(encoding=self.encoding or "utf-8") - chunker = TextChunker(chunk_size=chunk_size) - with request_context(request=self._request): - for byte_content in self.iter_bytes(): - text_content = decoder.decode(byte_content) - for chunk in chunker.decode(text_content): - yield chunk - text_content = decoder.flush() - for chunk in chunker.decode(text_content): - yield chunk - for chunk in chunker.flush(): - yield chunk - - def iter_lines(self) -> typing.Iterator[str]: - decoder = LineDecoder() - with request_context(request=self._request): - for text in self.iter_text(): - for line in decoder.decode(text): - yield line - for line in decoder.flush(): - yield line - - def iter_raw( - self, chunk_size: typing.Optional[int] = None - ) -> typing.Iterator[bytes]: - """ - A byte-iterator over the raw response content. - """ - if self.is_stream_consumed: - raise StreamConsumed() - if self.is_closed: - raise StreamClosed() - if not isinstance(self.stream, SyncByteStream): - raise RuntimeError("Attempted to call a sync iterator on an async stream.") - - self.is_stream_consumed = True - self._num_bytes_downloaded = 0 - chunker = ByteChunker(chunk_size=chunk_size) - - with request_context(request=self._request): - for raw_stream_bytes in self.stream: - self._num_bytes_downloaded += len(raw_stream_bytes) - for chunk in chunker.decode(raw_stream_bytes): - yield chunk - - for chunk in chunker.flush(): - yield chunk - - self.close() - - def close(self) -> None: - """ - Close the response and release the connection. - Automatically called if the response body is read to completion. - """ - if not isinstance(self.stream, SyncByteStream): - raise RuntimeError("Attempted to call an sync close on an async stream.") - - if not self.is_closed: - self.is_closed = True - with request_context(request=self._request): - self.stream.close() - - async def aread(self) -> bytes: - """ - Read and return the response content. - """ - if not hasattr(self, "_content"): - self._content = b"".join([part async for part in self.aiter_bytes()]) - return self._content - - async def aiter_bytes( - self, chunk_size: typing.Optional[int] = None - ) -> typing.AsyncIterator[bytes]: - """ - A byte-iterator over the decoded response content. - This allows us to handle gzip, deflate, and brotli encoded responses. - """ - if hasattr(self, "_content"): - chunk_size = len(self._content) if chunk_size is None else chunk_size - for i in range(0, len(self._content), max(chunk_size, 1)): - yield self._content[i : i + chunk_size] - else: - decoder = self._get_content_decoder() - chunker = ByteChunker(chunk_size=chunk_size) - with request_context(request=self._request): - async for raw_bytes in self.aiter_raw(): - decoded = decoder.decode(raw_bytes) - for chunk in chunker.decode(decoded): - yield chunk - decoded = decoder.flush() - for chunk in chunker.decode(decoded): - yield chunk # pragma: no cover - for chunk in chunker.flush(): - yield chunk - - async def aiter_text( - self, chunk_size: typing.Optional[int] = None - ) -> typing.AsyncIterator[str]: - """ - A str-iterator over the decoded response content - that handles both gzip, deflate, etc but also detects the content's - string encoding. - """ - decoder = TextDecoder(encoding=self.encoding or "utf-8") - chunker = TextChunker(chunk_size=chunk_size) - with request_context(request=self._request): - async for byte_content in self.aiter_bytes(): - text_content = decoder.decode(byte_content) - for chunk in chunker.decode(text_content): - yield chunk - text_content = decoder.flush() - for chunk in chunker.decode(text_content): - yield chunk - for chunk in chunker.flush(): - yield chunk - - async def aiter_lines(self) -> typing.AsyncIterator[str]: - decoder = LineDecoder() - with request_context(request=self._request): - async for text in self.aiter_text(): - for line in decoder.decode(text): - yield line - for line in decoder.flush(): - yield line - - async def aiter_raw( - self, chunk_size: typing.Optional[int] = None - ) -> typing.AsyncIterator[bytes]: - """ - A byte-iterator over the raw response content. - """ - if self.is_stream_consumed: - raise StreamConsumed() - if self.is_closed: - raise StreamClosed() - if not isinstance(self.stream, AsyncByteStream): - raise RuntimeError("Attempted to call an async iterator on an sync stream.") - - self.is_stream_consumed = True - self._num_bytes_downloaded = 0 - chunker = ByteChunker(chunk_size=chunk_size) - - with request_context(request=self._request): - async for raw_stream_bytes in self.stream: - self._num_bytes_downloaded += len(raw_stream_bytes) - for chunk in chunker.decode(raw_stream_bytes): - yield chunk - - for chunk in chunker.flush(): - yield chunk - - await self.aclose() - - async def aclose(self) -> None: - """ - Close the response and release the connection. - Automatically called if the response body is read to completion. - """ - if not isinstance(self.stream, AsyncByteStream): - raise RuntimeError("Attempted to call an async close on an sync stream.") - - if not self.is_closed: - self.is_closed = True - with request_context(request=self._request): - await self.stream.aclose() - - -class Cookies(typing.MutableMapping[str, str]): - """ - HTTP Cookies, as a mutable mapping. - """ - - def __init__(self, cookies: typing.Optional[CookieTypes] = None) -> None: - if cookies is None or isinstance(cookies, dict): - self.jar = CookieJar() - if isinstance(cookies, dict): - for key, value in cookies.items(): - self.set(key, value) - elif isinstance(cookies, list): - self.jar = CookieJar() - for key, value in cookies: - self.set(key, value) - elif isinstance(cookies, Cookies): - self.jar = CookieJar() - for cookie in cookies.jar: - self.jar.set_cookie(cookie) - else: - self.jar = cookies - - def extract_cookies(self, response: Response) -> None: - """ - Loads any cookies based on the response `Set-Cookie` headers. - """ - urllib_response = self._CookieCompatResponse(response) - urllib_request = self._CookieCompatRequest(response.request) - - self.jar.extract_cookies(urllib_response, urllib_request) # type: ignore - - def set_cookie_header(self, request: Request) -> None: - """ - Sets an appropriate 'Cookie:' HTTP header on the `Request`. - """ - urllib_request = self._CookieCompatRequest(request) - self.jar.add_cookie_header(urllib_request) - - def set(self, name: str, value: str, domain: str = "", path: str = "/") -> None: - """ - Set a cookie value by name. May optionally include domain and path. - """ - kwargs = { - "version": 0, - "name": name, - "value": value, - "port": None, - "port_specified": False, - "domain": domain, - "domain_specified": bool(domain), - "domain_initial_dot": domain.startswith("."), - "path": path, - "path_specified": bool(path), - "secure": False, - "expires": None, - "discard": True, - "comment": None, - "comment_url": None, - "rest": {"HttpOnly": None}, - "rfc2109": False, - } - cookie = Cookie(**kwargs) # type: ignore - self.jar.set_cookie(cookie) - - def get( # type: ignore - self, - name: str, - default: typing.Optional[str] = None, - domain: typing.Optional[str] = None, - path: typing.Optional[str] = None, - ) -> typing.Optional[str]: - """ - Get a cookie by name. May optionally include domain and path - in order to specify exactly which cookie to retrieve. - """ - value = None - for cookie in self.jar: - if cookie.name == name: - if domain is None or cookie.domain == domain: - if path is None or cookie.path == path: - if value is not None: - message = f"Multiple cookies exist with name={name}" - raise CookieConflict(message) - value = cookie.value - - if value is None: - return default - return value - - def delete( - self, - name: str, - domain: typing.Optional[str] = None, - path: typing.Optional[str] = None, - ) -> None: - """ - Delete a cookie by name. May optionally include domain and path - in order to specify exactly which cookie to delete. - """ - if domain is not None and path is not None: - return self.jar.clear(domain, path, name) - - remove = [ - cookie - for cookie in self.jar - if cookie.name == name - and (domain is None or cookie.domain == domain) - and (path is None or cookie.path == path) - ] - - for cookie in remove: - self.jar.clear(cookie.domain, cookie.path, cookie.name) - - def clear( - self, domain: typing.Optional[str] = None, path: typing.Optional[str] = None - ) -> None: - """ - Delete all cookies. Optionally include a domain and path in - order to only delete a subset of all the cookies. - """ - args = [] - if domain is not None: - args.append(domain) - if path is not None: - assert domain is not None - args.append(path) - self.jar.clear(*args) - - def update(self, cookies: typing.Optional[CookieTypes] = None) -> None: # type: ignore - cookies = Cookies(cookies) - for cookie in cookies.jar: - self.jar.set_cookie(cookie) - - def __setitem__(self, name: str, value: str) -> None: - return self.set(name, value) - - def __getitem__(self, name: str) -> str: - value = self.get(name) - if value is None: - raise KeyError(name) - return value - - def __delitem__(self, name: str) -> None: - return self.delete(name) - - def __len__(self) -> int: - return len(self.jar) - - def __iter__(self) -> typing.Iterator[str]: - return (cookie.name for cookie in self.jar) - - def __bool__(self) -> bool: - for _ in self.jar: - return True - return False - - def __repr__(self) -> str: - cookies_repr = ", ".join( - [ - f"" - for cookie in self.jar - ] - ) - - return f"" - - class _CookieCompatRequest(urllib.request.Request): - """ - Wraps a `Request` instance up in a compatibility interface suitable - for use with `CookieJar` operations. - """ - - def __init__(self, request: Request) -> None: - super().__init__( - url=str(request.url), - headers=dict(request.headers), - method=request.method, - ) - self.request = request - - def add_unredirected_header(self, key: str, value: str) -> None: - super().add_unredirected_header(key, value) - self.request.headers[key] = value - - class _CookieCompatResponse: - """ - Wraps a `Request` instance up in a compatibility interface suitable - for use with `CookieJar` operations. - """ - - def __init__(self, response: Response): - self.response = response - - def info(self) -> email.message.Message: - info = email.message.Message() - for key, value in self.response.headers.multi_items(): - # Note that setting `info[key]` here is an "append" operation, - # not a "replace" operation. - # https://docs.python.org/3/library/email.compat32-message.html#email.message.Message.__setitem__ - info[key] = value - return info diff --git a/spaces/Detomo/Car_part_classification/app.py b/spaces/Detomo/Car_part_classification/app.py deleted file mode 100644 index c07e187a03368f0542fc5ac618c2ca45e3f73024..0000000000000000000000000000000000000000 --- a/spaces/Detomo/Car_part_classification/app.py +++ /dev/null @@ -1,84 +0,0 @@ -# import shutil -# import cv2 -# from PIL import Image -# from collections import deque, Counter - -import time, os, json, onnx, onnxruntime -# import torch -import pandas as pd -import streamlit as st -import requests -from utils import * -import args -from streamlit_lottie import st_lottie - -st.set_page_config( - page_title=args.PAGE_TITLE, - page_icon=args.PAGE_ICON, layout=args.LAYOUT, initial_sidebar_state='auto' -) - - -def load_lottieurl(url: str): - r = requests.get(url) - if r.status_code != 200: - return None - return r.json() - - -# Configure -options = onnxruntime.SessionOptions() -options.intra_op_num_threads = 8 -options.inter_op_num_threads = 8 - -lottie_penguin = load_lottieurl('https://assets10.lottiefiles.com/datafiles/Yv8B88Go8kHRZ5T/data.json') -st_lottie(lottie_penguin, height=200) - -hide_streamlit_style = """ - - """ -st.markdown(hide_streamlit_style, unsafe_allow_html=True) - -st.write(args.LANDINGPAGE_TITLE) -st.sidebar.title(args.SIDEBAR_TITLE) -method = st.sidebar.radio('Choose input source 👇', options=['Image']) - - -# Load model -@st.cache(suppress_st_warning=False) -def initial_setup(): - df_train = pd.read_csv('full_set.csv') - sub_test_list = sorted(list(df_train['Image'].map(lambda x: get_image(x)))) - with open('embeddings.npy', 'rb') as f: - embeddings = np.load(f) - PATH = 'model_onnx.onnx' - ort_session = onnxruntime.InferenceSession(PATH, sess_options=options) - input_name = ort_session.get_inputs()[0].name - return df_train, sub_test_list, embeddings, ort_session, input_name - - -df_train, sub_test_list, embeddings, ort_session, input_name = initial_setup() - -if method == 'Image': - st.sidebar.markdown('---') - st.sidebar.header('Options') - content_file, col2 = show_original() - image_input( - content_file, df_train, sub_test_list, embeddings, ort_session, input_name, col2 - ) -else: - webcam_input( - df_train, sub_test_list, embeddings, ort_session, input_name - ) diff --git a/spaces/Detomo/naomi-app-api/Dockerfile b/spaces/Detomo/naomi-app-api/Dockerfile deleted file mode 100644 index 2918192a683f49bf99326376c4b72e3e6870cb49..0000000000000000000000000000000000000000 --- a/spaces/Detomo/naomi-app-api/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM python:3.9 - -WORKDIR /content - -RUN mkdir /content/cache/ - -RUN export TRANSFORMERS_CACHE=/content/cache/ - -COPY ./requirements.txt /content/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /content/requirements.txt -RUN apt-get update && apt-get install -y ffmpeg - -COPY . . - -RUN adduser --disabled-password --gecos '' admin -RUN adduser admin sudo -RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers - -RUN chown -R admin:admin /content -RUN chmod -R 777 /content -USER admin - -EXPOSE 7860 - -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/DkLead/facebook-tts_transformer-ru-cv7_css10/README.md b/spaces/DkLead/facebook-tts_transformer-ru-cv7_css10/README.md deleted file mode 100644 index 355c75d1ab4b3a96218371ff1ce3680bacfc8ae5..0000000000000000000000000000000000000000 --- a/spaces/DkLead/facebook-tts_transformer-ru-cv7_css10/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Facebook-tts Transformer-ru-cv7 Css10 -emoji: 📊 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DragGan/DragGan/stylegan_human/legacy.py b/spaces/DragGan/DragGan/stylegan_human/legacy.py deleted file mode 100644 index ef0e838df5426e5f01ba3b917244de553678ed0f..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/legacy.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. -# -import pickle -import dnnlib -import re -from typing import List, Optional -import torch -import copy -import numpy as np -from torch_utils import misc - - -#---------------------------------------------------------------------------- -## loading torch pkl -def load_network_pkl(f, force_fp16=False, G_only=False): - data = _LegacyUnpickler(f).load() - if G_only: - f = open('ori_model_Gonly.txt','a+') - else: f = open('ori_model.txt','a+') - for key in data.keys(): - f.write(str(data[key])) - f.close() - - ## We comment out this part, if you want to convert TF pickle, you can use the original script from StyleGAN2-ada-pytorch - # # Legacy TensorFlow pickle => convert. - # if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data): - # tf_G, tf_D, tf_Gs = data - # G = convert_tf_generator(tf_G) - # D = convert_tf_discriminator(tf_D) - # G_ema = convert_tf_generator(tf_Gs) - # data = dict(G=G, D=D, G_ema=G_ema) - - # Add missing fields. - if 'training_set_kwargs' not in data: - data['training_set_kwargs'] = None - if 'augment_pipe' not in data: - data['augment_pipe'] = None - - # Validate contents. - assert isinstance(data['G_ema'], torch.nn.Module) - if not G_only: - assert isinstance(data['D'], torch.nn.Module) - assert isinstance(data['G'], torch.nn.Module) - assert isinstance(data['training_set_kwargs'], (dict, type(None))) - assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None))) - - # Force FP16. - if force_fp16: - if G_only: - convert_list = ['G_ema'] #'G' - else: convert_list = ['G', 'D', 'G_ema'] - for key in convert_list: - old = data[key] - kwargs = copy.deepcopy(old.init_kwargs) - if key.startswith('G'): - kwargs.synthesis_kwargs = dnnlib.EasyDict(kwargs.get('synthesis_kwargs', {})) - kwargs.synthesis_kwargs.num_fp16_res = 4 - kwargs.synthesis_kwargs.conv_clamp = 256 - if key.startswith('D'): - kwargs.num_fp16_res = 4 - kwargs.conv_clamp = 256 - if kwargs != old.init_kwargs: - new = type(old)(**kwargs).eval().requires_grad_(False) - misc.copy_params_and_buffers(old, new, require_all=True) - data[key] = new - return data - -class _TFNetworkStub(dnnlib.EasyDict): - pass - -class _LegacyUnpickler(pickle.Unpickler): - def find_class(self, module, name): - if module == 'dnnlib.tflib.network' and name == 'Network': - return _TFNetworkStub - return super().find_class(module, name) - -#---------------------------------------------------------------------------- - -def num_range(s: str) -> List[int]: - '''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.''' - - range_re = re.compile(r'^(\d+)-(\d+)$') - m = range_re.match(s) - if m: - return list(range(int(m.group(1)), int(m.group(2))+1)) - vals = s.split(',') - return [int(x) for x in vals] - - - -#---------------------------------------------------------------------------- -#### loading tf pkl -def load_pkl(file_or_url): - with open(file_or_url, 'rb') as file: - return pickle.load(file, encoding='latin1') - -#---------------------------------------------------------------------------- - -### For editing -def visual(output, out_path): - import torch - import cv2 - import numpy as np - output = (output + 1)/2 - output = torch.clamp(output, 0, 1) - if output.shape[1] == 1: - output = torch.cat([output, output, output], 1) - output = output[0].detach().cpu().permute(1,2,0).numpy() - output = (output*255).astype(np.uint8) - output = output[:,:,::-1] - cv2.imwrite(out_path, output) - -def save_obj(obj, path): - with open(path, 'wb+') as f: - pickle.dump(obj, f, protocol=4) - -#---------------------------------------------------------------------------- - -## Converting pkl to pth, change dict info inside pickle - -def convert_to_rgb(state_ros, state_nv, ros_name, nv_name): - state_ros[f"{ros_name}.conv.weight"] = state_nv[f"{nv_name}.torgb.weight"].unsqueeze(0) - state_ros[f"{ros_name}.bias"] = state_nv[f"{nv_name}.torgb.bias"].unsqueeze(0).unsqueeze(-1).unsqueeze(-1) - state_ros[f"{ros_name}.conv.modulation.weight"] = state_nv[f"{nv_name}.torgb.affine.weight"] - state_ros[f"{ros_name}.conv.modulation.bias"] = state_nv[f"{nv_name}.torgb.affine.bias"] - - -def convert_conv(state_ros, state_nv, ros_name, nv_name): - state_ros[f"{ros_name}.conv.weight"] = state_nv[f"{nv_name}.weight"].unsqueeze(0) - state_ros[f"{ros_name}.activate.bias"] = state_nv[f"{nv_name}.bias"] - state_ros[f"{ros_name}.conv.modulation.weight"] = state_nv[f"{nv_name}.affine.weight"] - state_ros[f"{ros_name}.conv.modulation.bias"] = state_nv[f"{nv_name}.affine.bias"] - state_ros[f"{ros_name}.noise.weight"] = state_nv[f"{nv_name}.noise_strength"].unsqueeze(0) - - -def convert_blur_kernel(state_ros, state_nv, level): - """Not quite sure why there is a factor of 4 here""" - # They are all the same - state_ros[f"convs.{2*level}.conv.blur.kernel"] = 4*state_nv["synthesis.b4.resample_filter"] - state_ros[f"to_rgbs.{level}.upsample.kernel"] = 4*state_nv["synthesis.b4.resample_filter"] - - -def determine_config(state_nv): - mapping_names = [name for name in state_nv.keys() if "mapping.fc" in name] - sythesis_names = [name for name in state_nv.keys() if "synthesis.b" in name] - - n_mapping = max([int(re.findall("(\d+)", n)[0]) for n in mapping_names]) + 1 - resolution = max([int(re.findall("(\d+)", n)[0]) for n in sythesis_names]) - n_layers = np.log(resolution/2)/np.log(2) - - return n_mapping, n_layers - - -def convert(network_pkl, output_file, G_only=False): - with dnnlib.util.open_url(network_pkl) as f: - G_nvidia = load_network_pkl(f,G_only=G_only)['G_ema'] - - state_nv = G_nvidia.state_dict() - n_mapping, n_layers = determine_config(state_nv) - - state_ros = {} - - for i in range(n_mapping): - state_ros[f"style.{i+1}.weight"] = state_nv[f"mapping.fc{i}.weight"] - state_ros[f"style.{i+1}.bias"] = state_nv[f"mapping.fc{i}.bias"] - - for i in range(int(n_layers)): - if i > 0: - for conv_level in range(2): - convert_conv(state_ros, state_nv, f"convs.{2*i-2+conv_level}", f"synthesis.b{4*(2**i)}.conv{conv_level}") - state_ros[f"noises.noise_{2*i-1+conv_level}"] = state_nv[f"synthesis.b{4*(2**i)}.conv{conv_level}.noise_const"].unsqueeze(0).unsqueeze(0) - - convert_to_rgb(state_ros, state_nv, f"to_rgbs.{i-1}", f"synthesis.b{4*(2**i)}") - convert_blur_kernel(state_ros, state_nv, i-1) - - else: - state_ros[f"input.input"] = state_nv[f"synthesis.b{4*(2**i)}.const"].unsqueeze(0) - convert_conv(state_ros, state_nv, "conv1", f"synthesis.b{4*(2**i)}.conv1") - state_ros[f"noises.noise_{2*i}"] = state_nv[f"synthesis.b{4*(2**i)}.conv1.noise_const"].unsqueeze(0).unsqueeze(0) - convert_to_rgb(state_ros, state_nv, "to_rgb1", f"synthesis.b{4*(2**i)}") - - # https://github.com/yuval-alaluf/restyle-encoder/issues/1#issuecomment-828354736 - latent_avg = state_nv['mapping.w_avg'] - state_dict = {"g_ema": state_ros, "latent_avg": latent_avg} - # if G_only: - # f = open('converted_model_Gonly.txt','a+') - # else: - # f = open('converted_model.txt','a+') - # for key in state_dict['g_ema'].keys(): - # f.write(str(key)+': '+str(state_dict['g_ema'][key].shape)+'\n') - # f.close() - torch.save(state_dict, output_file) - diff --git a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/src/utils.cpp b/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/src/utils.cpp deleted file mode 100644 index 4aa0305cd6cf025496528ef9ff49075209fe9e8c..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/deploy/TensorRT/cpp/src/utils.cpp +++ /dev/null @@ -1,429 +0,0 @@ -#include "BYTETracker.h" -#include "lapjv.h" - -vector BYTETracker::joint_stracks(vector &tlista, vector &tlistb) -{ - map exists; - vector res; - for (int i = 0; i < tlista.size(); i++) - { - exists.insert(pair(tlista[i]->track_id, 1)); - res.push_back(tlista[i]); - } - for (int i = 0; i < tlistb.size(); i++) - { - int tid = tlistb[i].track_id; - if (!exists[tid] || exists.count(tid) == 0) - { - exists[tid] = 1; - res.push_back(&tlistb[i]); - } - } - return res; -} - -vector BYTETracker::joint_stracks(vector &tlista, vector &tlistb) -{ - map exists; - vector res; - for (int i = 0; i < tlista.size(); i++) - { - exists.insert(pair(tlista[i].track_id, 1)); - res.push_back(tlista[i]); - } - for (int i = 0; i < tlistb.size(); i++) - { - int tid = tlistb[i].track_id; - if (!exists[tid] || exists.count(tid) == 0) - { - exists[tid] = 1; - res.push_back(tlistb[i]); - } - } - return res; -} - -vector BYTETracker::sub_stracks(vector &tlista, vector &tlistb) -{ - map stracks; - for (int i = 0; i < tlista.size(); i++) - { - stracks.insert(pair(tlista[i].track_id, tlista[i])); - } - for (int i = 0; i < tlistb.size(); i++) - { - int tid = tlistb[i].track_id; - if (stracks.count(tid) != 0) - { - stracks.erase(tid); - } - } - - vector res; - std::map::iterator it; - for (it = stracks.begin(); it != stracks.end(); ++it) - { - res.push_back(it->second); - } - - return res; -} - -void BYTETracker::remove_duplicate_stracks(vector &resa, vector &resb, vector &stracksa, vector &stracksb) -{ - vector > pdist = iou_distance(stracksa, stracksb); - vector > pairs; - for (int i = 0; i < pdist.size(); i++) - { - for (int j = 0; j < pdist[i].size(); j++) - { - if (pdist[i][j] < 0.15) - { - pairs.push_back(pair(i, j)); - } - } - } - - vector dupa, dupb; - for (int i = 0; i < pairs.size(); i++) - { - int timep = stracksa[pairs[i].first].frame_id - stracksa[pairs[i].first].start_frame; - int timeq = stracksb[pairs[i].second].frame_id - stracksb[pairs[i].second].start_frame; - if (timep > timeq) - dupb.push_back(pairs[i].second); - else - dupa.push_back(pairs[i].first); - } - - for (int i = 0; i < stracksa.size(); i++) - { - vector::iterator iter = find(dupa.begin(), dupa.end(), i); - if (iter == dupa.end()) - { - resa.push_back(stracksa[i]); - } - } - - for (int i = 0; i < stracksb.size(); i++) - { - vector::iterator iter = find(dupb.begin(), dupb.end(), i); - if (iter == dupb.end()) - { - resb.push_back(stracksb[i]); - } - } -} - -void BYTETracker::linear_assignment(vector > &cost_matrix, int cost_matrix_size, int cost_matrix_size_size, float thresh, - vector > &matches, vector &unmatched_a, vector &unmatched_b) -{ - if (cost_matrix.size() == 0) - { - for (int i = 0; i < cost_matrix_size; i++) - { - unmatched_a.push_back(i); - } - for (int i = 0; i < cost_matrix_size_size; i++) - { - unmatched_b.push_back(i); - } - return; - } - - vector rowsol; vector colsol; - float c = lapjv(cost_matrix, rowsol, colsol, true, thresh); - for (int i = 0; i < rowsol.size(); i++) - { - if (rowsol[i] >= 0) - { - vector match; - match.push_back(i); - match.push_back(rowsol[i]); - matches.push_back(match); - } - else - { - unmatched_a.push_back(i); - } - } - - for (int i = 0; i < colsol.size(); i++) - { - if (colsol[i] < 0) - { - unmatched_b.push_back(i); - } - } -} - -vector > BYTETracker::ious(vector > &atlbrs, vector > &btlbrs) -{ - vector > ious; - if (atlbrs.size()*btlbrs.size() == 0) - return ious; - - ious.resize(atlbrs.size()); - for (int i = 0; i < ious.size(); i++) - { - ious[i].resize(btlbrs.size()); - } - - //bbox_ious - for (int k = 0; k < btlbrs.size(); k++) - { - vector ious_tmp; - float box_area = (btlbrs[k][2] - btlbrs[k][0] + 1)*(btlbrs[k][3] - btlbrs[k][1] + 1); - for (int n = 0; n < atlbrs.size(); n++) - { - float iw = min(atlbrs[n][2], btlbrs[k][2]) - max(atlbrs[n][0], btlbrs[k][0]) + 1; - if (iw > 0) - { - float ih = min(atlbrs[n][3], btlbrs[k][3]) - max(atlbrs[n][1], btlbrs[k][1]) + 1; - if(ih > 0) - { - float ua = (atlbrs[n][2] - atlbrs[n][0] + 1)*(atlbrs[n][3] - atlbrs[n][1] + 1) + box_area - iw * ih; - ious[n][k] = iw * ih / ua; - } - else - { - ious[n][k] = 0.0; - } - } - else - { - ious[n][k] = 0.0; - } - } - } - - return ious; -} - -vector > BYTETracker::iou_distance(vector &atracks, vector &btracks, int &dist_size, int &dist_size_size) -{ - vector > cost_matrix; - if (atracks.size() * btracks.size() == 0) - { - dist_size = atracks.size(); - dist_size_size = btracks.size(); - return cost_matrix; - } - vector > atlbrs, btlbrs; - for (int i = 0; i < atracks.size(); i++) - { - atlbrs.push_back(atracks[i]->tlbr); - } - for (int i = 0; i < btracks.size(); i++) - { - btlbrs.push_back(btracks[i].tlbr); - } - - dist_size = atracks.size(); - dist_size_size = btracks.size(); - - vector > _ious = ious(atlbrs, btlbrs); - - for (int i = 0; i < _ious.size();i++) - { - vector _iou; - for (int j = 0; j < _ious[i].size(); j++) - { - _iou.push_back(1 - _ious[i][j]); - } - cost_matrix.push_back(_iou); - } - - return cost_matrix; -} - -vector > BYTETracker::iou_distance(vector &atracks, vector &btracks) -{ - vector > atlbrs, btlbrs; - for (int i = 0; i < atracks.size(); i++) - { - atlbrs.push_back(atracks[i].tlbr); - } - for (int i = 0; i < btracks.size(); i++) - { - btlbrs.push_back(btracks[i].tlbr); - } - - vector > _ious = ious(atlbrs, btlbrs); - vector > cost_matrix; - for (int i = 0; i < _ious.size(); i++) - { - vector _iou; - for (int j = 0; j < _ious[i].size(); j++) - { - _iou.push_back(1 - _ious[i][j]); - } - cost_matrix.push_back(_iou); - } - - return cost_matrix; -} - -double BYTETracker::lapjv(const vector > &cost, vector &rowsol, vector &colsol, - bool extend_cost, float cost_limit, bool return_cost) -{ - vector > cost_c; - cost_c.assign(cost.begin(), cost.end()); - - vector > cost_c_extended; - - int n_rows = cost.size(); - int n_cols = cost[0].size(); - rowsol.resize(n_rows); - colsol.resize(n_cols); - - int n = 0; - if (n_rows == n_cols) - { - n = n_rows; - } - else - { - if (!extend_cost) - { - cout << "set extend_cost=True" << endl; - system("pause"); - exit(0); - } - } - - if (extend_cost || cost_limit < LONG_MAX) - { - n = n_rows + n_cols; - cost_c_extended.resize(n); - for (int i = 0; i < cost_c_extended.size(); i++) - cost_c_extended[i].resize(n); - - if (cost_limit < LONG_MAX) - { - for (int i = 0; i < cost_c_extended.size(); i++) - { - for (int j = 0; j < cost_c_extended[i].size(); j++) - { - cost_c_extended[i][j] = cost_limit / 2.0; - } - } - } - else - { - float cost_max = -1; - for (int i = 0; i < cost_c.size(); i++) - { - for (int j = 0; j < cost_c[i].size(); j++) - { - if (cost_c[i][j] > cost_max) - cost_max = cost_c[i][j]; - } - } - for (int i = 0; i < cost_c_extended.size(); i++) - { - for (int j = 0; j < cost_c_extended[i].size(); j++) - { - cost_c_extended[i][j] = cost_max + 1; - } - } - } - - for (int i = n_rows; i < cost_c_extended.size(); i++) - { - for (int j = n_cols; j < cost_c_extended[i].size(); j++) - { - cost_c_extended[i][j] = 0; - } - } - for (int i = 0; i < n_rows; i++) - { - for (int j = 0; j < n_cols; j++) - { - cost_c_extended[i][j] = cost_c[i][j]; - } - } - - cost_c.clear(); - cost_c.assign(cost_c_extended.begin(), cost_c_extended.end()); - } - - double **cost_ptr; - cost_ptr = new double *[sizeof(double *) * n]; - for (int i = 0; i < n; i++) - cost_ptr[i] = new double[sizeof(double) * n]; - - for (int i = 0; i < n; i++) - { - for (int j = 0; j < n; j++) - { - cost_ptr[i][j] = cost_c[i][j]; - } - } - - int* x_c = new int[sizeof(int) * n]; - int *y_c = new int[sizeof(int) * n]; - - int ret = lapjv_internal(n, cost_ptr, x_c, y_c); - if (ret != 0) - { - cout << "Calculate Wrong!" << endl; - system("pause"); - exit(0); - } - - double opt = 0.0; - - if (n != n_rows) - { - for (int i = 0; i < n; i++) - { - if (x_c[i] >= n_cols) - x_c[i] = -1; - if (y_c[i] >= n_rows) - y_c[i] = -1; - } - for (int i = 0; i < n_rows; i++) - { - rowsol[i] = x_c[i]; - } - for (int i = 0; i < n_cols; i++) - { - colsol[i] = y_c[i]; - } - - if (return_cost) - { - for (int i = 0; i < rowsol.size(); i++) - { - if (rowsol[i] != -1) - { - //cout << i << "\t" << rowsol[i] << "\t" << cost_ptr[i][rowsol[i]] << endl; - opt += cost_ptr[i][rowsol[i]]; - } - } - } - } - else if (return_cost) - { - for (int i = 0; i < rowsol.size(); i++) - { - opt += cost_ptr[i][rowsol[i]]; - } - } - - for (int i = 0; i < n; i++) - { - delete[]cost_ptr[i]; - } - delete[]cost_ptr; - delete[]x_c; - delete[]y_c; - - return opt; -} - -Scalar BYTETracker::get_color(int idx) -{ - idx += 3; - return Scalar(37 * idx % 255, 17 * idx % 255, 29 * idx % 255); -} \ No newline at end of file diff --git a/spaces/EuroPython2022/ToxicCommentClassification/README.md b/spaces/EuroPython2022/ToxicCommentClassification/README.md deleted file mode 100644 index 181ba45cc244f045f34d78ff3f11f9fbd871e3e0..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/ToxicCommentClassification/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ToxicCommentClassification -emoji: 📊 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.0.25 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/EuroPython2022/mmocr-demo/configs/kie/sdmgr/sdmgr_novisual_60e_wildreceipt.py b/spaces/EuroPython2022/mmocr-demo/configs/kie/sdmgr/sdmgr_novisual_60e_wildreceipt.py deleted file mode 100644 index 220135a0b037909599fbaf77c75b06f48f8b1ba7..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/kie/sdmgr/sdmgr_novisual_60e_wildreceipt.py +++ /dev/null @@ -1,98 +0,0 @@ -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -max_scale, min_scale = 1024, 512 - -train_pipeline = [ - dict(type='LoadAnnotations'), - dict( - type='ResizeNoImg', img_scale=(max_scale, min_scale), keep_ratio=True), - dict(type='KIEFormatBundle'), - dict( - type='Collect', - keys=['img', 'relations', 'texts', 'gt_bboxes', 'gt_labels'], - meta_keys=('filename', 'ori_texts')) -] -test_pipeline = [ - dict(type='LoadAnnotations'), - dict( - type='ResizeNoImg', img_scale=(max_scale, min_scale), keep_ratio=True), - dict(type='KIEFormatBundle'), - dict( - type='Collect', - keys=['img', 'relations', 'texts', 'gt_bboxes'], - meta_keys=('filename', 'ori_texts', 'img_norm_cfg', 'ori_filename', - 'img_shape')) -] - -dataset_type = 'KIEDataset' -data_root = 'data/wildreceipt' - -loader = dict( - type='HardDiskLoader', - repeat=1, - parser=dict( - type='LineJsonParser', - keys=['file_name', 'height', 'width', 'annotations'])) - -train = dict( - type=dataset_type, - ann_file=f'{data_root}/train.txt', - pipeline=train_pipeline, - img_prefix=data_root, - loader=loader, - dict_file=f'{data_root}/dict.txt', - test_mode=False) -test = dict( - type=dataset_type, - ann_file=f'{data_root}/test.txt', - pipeline=test_pipeline, - img_prefix=data_root, - loader=loader, - dict_file=f'{data_root}/dict.txt', - test_mode=True) - -data = dict( - samples_per_gpu=4, - workers_per_gpu=1, - val_dataloader=dict(samples_per_gpu=1), - test_dataloader=dict(samples_per_gpu=1), - train=train, - val=test, - test=test) - -evaluation = dict( - interval=1, - metric='macro_f1', - metric_options=dict( - macro_f1=dict( - ignores=[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 25]))) - -model = dict( - type='SDMGR', - backbone=dict(type='UNet', base_channels=16), - bbox_head=dict( - type='SDMGRHead', visual_dim=16, num_chars=92, num_classes=26), - visual_modality=False, - train_cfg=None, - test_cfg=None, - class_list=f'{data_root}/class_list.txt') - -optimizer = dict(type='Adam', weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1, - warmup_ratio=1, - step=[40, 50]) -total_epochs = 60 - -checkpoint_config = dict(interval=1) -log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] - -find_unused_parameters = True diff --git a/spaces/Fcjs/digiplay-Real3D_F16full_v3.1/app.py b/spaces/Fcjs/digiplay-Real3D_F16full_v3.1/app.py deleted file mode 100644 index c05aed1df56f86f24055730a3a7b852bd1776c45..0000000000000000000000000000000000000000 --- a/spaces/Fcjs/digiplay-Real3D_F16full_v3.1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/digiplay/RealCartoon3D_F16full_v3.1").launch() \ No newline at end of file diff --git a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/ChatFree.py b/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/ChatFree.py deleted file mode 100644 index 6bbaebaed35681026ff1eeb8eee3270e3b0741fd..0000000000000000000000000000000000000000 --- a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/ChatFree.py +++ /dev/null @@ -1,48 +0,0 @@ -import os, requests -from ...typing import sha256, Dict, get_type_hints -import json - -url = "https://v.chatfree.cc" -model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k'] -supports_stream = False -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - headers = { - 'authority': 'chat.dfehub.com', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'content-type': 'application/json', - 'origin': 'https://v.chatfree.cc', - 'referer': 'https://v.chatfree.cc/', - 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - 'x-requested-with': 'XMLHttpRequest', - } - - json_data = { - 'messages': messages, - 'stream': True, - 'model': model, - 'temperature': 0.5, - 'presence_penalty': 0, - 'frequency_penalty': 0, - 'top_p': 1, - } - - response = requests.post('https://v.chatfree.cc/api/openai/v1/chat/completions', - headers=headers, json=json_data) - - for chunk in response.iter_lines(): - if b'content' in chunk: - data = json.loads(chunk.decode().split('data: ')[1]) - yield (data['choices'][0]['delta']['content']) - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/GXSA/bingo/src/components/button-scroll-to-bottom.tsx b/spaces/GXSA/bingo/src/components/button-scroll-to-bottom.tsx deleted file mode 100644 index b68ab9c0e48320c356e51a52d11b9ca63909e6c5..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/components/button-scroll-to-bottom.tsx +++ /dev/null @@ -1,34 +0,0 @@ -'use client' - -import * as React from 'react' - -import { cn } from '@/lib/utils' -import { useAtBottom } from '@/lib/hooks/use-at-bottom' -import { Button, type ButtonProps } from '@/components/ui/button' -import { IconArrowDown } from '@/components/ui/icons' - -export function ButtonScrollToBottom({ className, ...props }: ButtonProps) { - const isAtBottom = useAtBottom() - - return ( - - ) -} diff --git a/spaces/GXSA/bingo/src/lib/bots/bing/index.ts b/spaces/GXSA/bingo/src/lib/bots/bing/index.ts deleted file mode 100644 index c75c69f94af8c3db92d4c90d465c219a2af72a4d..0000000000000000000000000000000000000000 --- a/spaces/GXSA/bingo/src/lib/bots/bing/index.ts +++ /dev/null @@ -1,432 +0,0 @@ -import { fetch, WebSocket, debug } from '@/lib/isomorphic' -import WebSocketAsPromised from 'websocket-as-promised' -import { - SendMessageParams, - BingConversationStyle, - ConversationResponse, - ChatResponseMessage, - ConversationInfo, - InvocationEventType, - ChatError, - ErrorCode, - ChatUpdateCompleteResponse, - ImageInfo, - KBlobResponse -} from './types' - -import { convertMessageToMarkdown, websocketUtils, streamAsyncIterable } from './utils' -import { WatchDog, createChunkDecoder } from '@/lib/utils' - -type Params = SendMessageParams<{ bingConversationStyle: BingConversationStyle }> - -const OPTIONS_SETS = [ - 'nlu_direct_response_filter', - 'deepleo', - 'disable_emoji_spoken_text', - 'responsible_ai_policy_235', - 'enablemm', - 'iycapbing', - 'iyxapbing', - 'objopinion', - 'rweasgv2', - 'dagslnv1', - 'dv3sugg', - 'autosave', - 'iyoloxap', - 'iyoloneutral', - 'clgalileo', - 'gencontentv3', -] - -export class BingWebBot { - protected conversationContext?: ConversationInfo - protected cookie: string - protected ua: string - protected endpoint = '' - private lastText = '' - private asyncTasks: Array> = [] - - constructor(opts: { - cookie: string - ua: string - bingConversationStyle?: BingConversationStyle - conversationContext?: ConversationInfo - }) { - const { cookie, ua, conversationContext } = opts - this.cookie = cookie?.includes(';') ? cookie : `_EDGE_V=1; _U=${cookie}` - this.ua = ua - this.conversationContext = conversationContext - } - - static buildChatRequest(conversation: ConversationInfo) { - const optionsSets = OPTIONS_SETS - if (conversation.conversationStyle === BingConversationStyle.Precise) { - optionsSets.push('h3precise') - } else if (conversation.conversationStyle === BingConversationStyle.Creative) { - optionsSets.push('h3imaginative') - } - return { - arguments: [ - { - source: 'cib', - optionsSets, - allowedMessageTypes: [ - 'ActionRequest', - 'Chat', - 'Context', - 'InternalSearchQuery', - 'InternalSearchResult', - 'Disengaged', - 'InternalLoaderMessage', - 'Progress', - 'RenderCardRequest', - 'SemanticSerp', - 'GenerateContentQuery', - 'SearchQuery', - ], - sliceIds: [ - 'winmuid1tf', - 'anssupfor_c', - 'imgchatgptv2', - 'tts2cf', - 'contansperf', - 'mlchatpc8500w', - 'mlchatpc2', - 'ctrlworkpay', - 'winshortmsgtf', - 'cibctrl', - 'sydtransctrl', - 'sydconfigoptc', - '0705trt4', - '517opinion', - '628ajcopus0', - '330uaugs0', - '529rwea', - '0626snptrcs0', - '424dagslnv1', - ], - isStartOfSession: conversation.invocationId === 0, - message: { - author: 'user', - inputMethod: 'Keyboard', - text: conversation.prompt, - imageUrl: conversation.imageUrl, - messageType: 'Chat', - }, - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - participant: { id: conversation.clientId }, - }, - ], - invocationId: conversation.invocationId.toString(), - target: 'chat', - type: InvocationEventType.StreamInvocation, - } - } - - async createConversation(): Promise { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - - let resp: ConversationResponse | undefined - try { - const response = await fetch(this.endpoint + '/api/create', { method: 'POST', headers, redirect: 'error', mode: 'cors', credentials: 'include' }) - if (response.status === 404) { - throw new ChatError('Not Found', ErrorCode.NOTFOUND_ERROR) - } - resp = await response.json() as ConversationResponse - } catch (err) { - console.error('create conversation error', err) - } - - if (!resp?.result) { - throw new ChatError('你的 VPS 或代理可能被封禁,如有疑问,请前往 https://github.com/weaigc/bingo 咨询', ErrorCode.BING_IP_FORBIDDEN) - } - - const { value, message } = resp.result || {} - if (value !== 'Success') { - const errorMsg = `${value}: ${message}` - if (value === 'UnauthorizedRequest') { - if (/fetch failed/i.test(message || '')) { - throw new ChatError(errorMsg, ErrorCode.BING_IP_FORBIDDEN) - } - throw new ChatError(errorMsg, ErrorCode.BING_UNAUTHORIZED) - } - if (value === 'TryLater') { - throw new ChatError(errorMsg, ErrorCode.BING_TRY_LATER) - } - if (value === 'Forbidden') { - throw new ChatError(errorMsg, ErrorCode.BING_FORBIDDEN) - } - throw new ChatError(errorMsg, ErrorCode.UNKOWN_ERROR) - } - return resp - } - - private async createContext(conversationStyle: BingConversationStyle) { - if (!this.conversationContext) { - const conversation = await this.createConversation() - this.conversationContext = { - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - clientId: conversation.clientId, - invocationId: 0, - conversationStyle, - prompt: '', - } - } - return this.conversationContext - } - - async sendMessage(params: Params) { - try { - await this.createContext(params.options.bingConversationStyle) - Object.assign(this.conversationContext!, { prompt: params.prompt, imageUrl: params.imageUrl }) - return this.sydneyProxy(params) - } catch (error) { - params.onEvent({ - type: 'ERROR', - error: error instanceof ChatError ? error : new ChatError('Catch Error', ErrorCode.UNKOWN_ERROR), - }) - } - } - - private async sydneyProxy(params: Params) { - const abortController = new AbortController() - const response = await fetch(this.endpoint + '/api/sydney', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - signal: abortController.signal, - body: JSON.stringify(this.conversationContext!) - }) - if (response.status !== 200) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Unknown error', - ErrorCode.UNKOWN_ERROR, - ), - }) - } - params.signal?.addEventListener('abort', () => { - abortController.abort() - }) - - const textDecoder = createChunkDecoder() - for await (const chunk of streamAsyncIterable(response.body!)) { - this.parseEvents(params, websocketUtils.unpackMessage(textDecoder(chunk))) - } - } - - async sendWs() { - const wsConfig: ConstructorParameters[1] = { - packMessage: websocketUtils.packMessage, - unpackMessage: websocketUtils.unpackMessage, - createWebSocket: (url) => new WebSocket(url, { - headers: { - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'User-Agent': this.ua, - pragma: 'no-cache', - cookie: this.cookie, - } - }) - } - const wsp = new WebSocketAsPromised('wss://sydney.bing.com/sydney/ChatHub', wsConfig) - - wsp.open().then(() => { - wsp.sendPacked({ protocol: 'json', version: 1 }) - wsp.sendPacked({ type: 6 }) - wsp.sendPacked(BingWebBot.buildChatRequest(this.conversationContext!)) - }) - - return wsp - } - - private async useWs(params: Params) { - const wsp = await this.sendWs() - const watchDog = new WatchDog() - wsp.onUnpackedMessage.addListener((events) => { - watchDog.watch(() => { - wsp.sendPacked({ type: 6 }) - }) - this.parseEvents(params, events) - }) - - wsp.onClose.addListener(() => { - watchDog.reset() - params.onEvent({ type: 'DONE' }) - wsp.removeAllListeners() - }) - - params.signal?.addEventListener('abort', () => { - wsp.removeAllListeners() - wsp.close() - }) - } - - private async createImage(prompt: string, id: string) { - try { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - const query = new URLSearchParams({ - prompt, - id - }) - const response = await fetch(this.endpoint + '/api/image?' + query.toString(), - { - method: 'POST', - headers, - mode: 'cors', - credentials: 'include' - }) - .then(res => res.text()) - if (response) { - this.lastText += '\n' + response - } - } catch (err) { - console.error('Create Image Error', err) - } - } - - private buildKnowledgeApiPayload(imageUrl: string, conversationStyle: BingConversationStyle) { - const imageInfo: ImageInfo = {} - let imageBase64: string | undefined = undefined - const knowledgeRequest = { - imageInfo, - knowledgeRequest: { - invokedSkills: [ - 'ImageById' - ], - subscriptionId: 'Bing.Chat.Multimodal', - invokedSkillsRequestData: { - enableFaceBlur: true - }, - convoData: { - convoid: this.conversationContext?.conversationId, - convotone: conversationStyle, - } - }, - } - - if (imageUrl.startsWith('data:image/')) { - imageBase64 = imageUrl.replace('data:image/', ''); - const partIndex = imageBase64.indexOf(',') - if (partIndex) { - imageBase64 = imageBase64.substring(partIndex + 1) - } - } else { - imageInfo.url = imageUrl - } - return { knowledgeRequest, imageBase64 } - } - - async uploadImage(imageUrl: string, conversationStyle: BingConversationStyle = BingConversationStyle.Creative): Promise { - if (!imageUrl) { - return - } - await this.createContext(conversationStyle) - const payload = this.buildKnowledgeApiPayload(imageUrl, conversationStyle) - - const response = await fetch(this.endpoint + '/api/kblob', - { - headers: { - 'Content-Type': 'application/json', - }, - method: 'POST', - mode: 'cors', - credentials: 'include', - body: JSON.stringify(payload), - }) - .then(res => res.json()) - .catch(e => { - console.log('Error', e) - }) - return response - } - - private async generateContent(message: ChatResponseMessage) { - if (message.contentType === 'IMAGE') { - this.asyncTasks.push(this.createImage(message.text, message.messageId)) - } - } - - private async parseEvents(params: Params, events: any) { - const conversation = this.conversationContext! - - events?.forEach(async (event: ChatUpdateCompleteResponse) => { - debug('bing event', event) - if (event.type === 3) { - await Promise.all(this.asyncTasks) - this.asyncTasks = [] - params.onEvent({ type: 'UPDATE_ANSWER', data: { text: this.lastText } }) - params.onEvent({ type: 'DONE' }) - conversation.invocationId = parseInt(event.invocationId, 10) + 1 - } else if (event.type === 1) { - const messages = event.arguments[0].messages - if (messages) { - const text = convertMessageToMarkdown(messages[0]) - this.lastText = text - params.onEvent({ type: 'UPDATE_ANSWER', data: { text, spokenText: messages[0].text, throttling: event.arguments[0].throttling } }) - } - } else if (event.type === 2) { - const messages = event.item.messages as ChatResponseMessage[] | undefined - if (!messages) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - event.item.result.error || 'Unknown error', - event.item.result.value === 'Throttled' ? ErrorCode.THROTTLE_LIMIT - : event.item.result.value === 'CaptchaChallenge' ? (this.conversationContext?.conversationId?.includes('BingProdUnAuthenticatedUsers') ? ErrorCode.BING_UNAUTHORIZED : ErrorCode.BING_CAPTCHA) - : ErrorCode.UNKOWN_ERROR - ), - }) - return - } - const limited = messages.some((message) => - message.contentOrigin === 'TurnLimiter' - || message.messageType === 'Disengaged' - ) - if (limited) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Sorry, you have reached chat limit in this conversation.', - ErrorCode.CONVERSATION_LIMIT, - ), - }) - return - } - - const lastMessage = event.item.messages.at(-1) as ChatResponseMessage - const specialMessage = event.item.messages.find(message => message.author === 'bot' && message.contentType === 'IMAGE') - if (specialMessage) { - this.generateContent(specialMessage) - } - - if (lastMessage) { - const text = convertMessageToMarkdown(lastMessage) - this.lastText = text - params.onEvent({ - type: 'UPDATE_ANSWER', - data: { text, throttling: event.item.throttling, suggestedResponses: lastMessage.suggestedResponses, sourceAttributions: lastMessage.sourceAttributions }, - }) - } - } - }) - } - - resetConversation() { - this.conversationContext = undefined - } -} diff --git a/spaces/Gradio-Blocks/anime-colorization/scripts/pixel_guide_sample.py b/spaces/Gradio-Blocks/anime-colorization/scripts/pixel_guide_sample.py deleted file mode 100644 index 86639c16e232c50a77cbd8c1714996c98a114e2e..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/anime-colorization/scripts/pixel_guide_sample.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -Generate a large batch of samples from a super resolution model, given a batch -of samples from a regular model from image_sample.py. -""" - -import argparse -import os - -import blobfile as bf -import numpy as np -import torch as th -import torch.distributed as dist - -from torchvision import utils -from pixel_guide_diffusion import dist_util, logger -from pixel_guide_diffusion.image_datasets import load_data -from pixel_guide_diffusion.script_util import ( - pg_model_and_diffusion_defaults, - pg_create_model_and_diffusion, - args_to_dict, - add_dict_to_argparser, -) - - -def main(): - args = create_argparser().parse_args() - - dist_util.setup_dist() - logger.configure() - - logger.log("creating model...") - model, diffusion = pg_create_model_and_diffusion( - **args_to_dict(args, pg_model_and_diffusion_defaults().keys()) - ) - model.load_state_dict( - dist_util.load_state_dict(args.model_path, map_location="cpu") - ) - model.to(dist_util.dev()) - model.eval() - - logger.log("creating data loader...") - data = load_data( - data_dir=args.data_dir, - batch_size=args.batch_size, - image_size=args.image_size, - class_cond=args.class_cond, - guide_dir=args.guide_dir, - guide_size=args.guide_size, - deterministic=True, - ) - - logger.log("creating samples...") - os.makedirs('sample', exist_ok=True) - i = 0 - while i * args.batch_size < args.num_samples: - if dist.get_rank() == 0: - target, model_kwargs = next(data) - target = target.to(dist_util.dev()) - model_kwargs = {k: v.to(dist_util.dev()) for k, v in model_kwargs.items()} - - with th.no_grad(): - sample_fn = ( - diffusion.p_sample_loop if not args.use_ddim else diffusion.ddim_sample_loop - ) - sample = sample_fn( - model, - (args.batch_size, 3, args.image_size, args.image_size), - clip_denoised=args.clip_denoised, - model_kwargs=model_kwargs, - ) - - guide = model_kwargs["guide"] - h, w = guide.shape[2:] - guide = guide.clamp(-1,1).repeat(1,3,1,1) - sample = th.nn.functional.interpolate(sample.clamp(-1,1), size=(h, w)) - target = th.nn.functional.interpolate(target.clamp(-1,1), size=(h, w)) - - images = th.cat([guide, sample, target], 0) - utils.save_image( - images, - f"sample/{str(i).zfill(6)}.png", - nrow=args.batch_size, - normalize=True, - range=(-1, 1), - ) - - i += 1 - logger.log(f"created {i * args.batch_size} samples") - - logger.log("sampling complete") - - -def create_argparser(): - defaults = dict( - data_dir="", - guide_dir="", - clip_denoised=True, - num_samples=100, - batch_size=4, - use_ddim=False, - base_samples="", - model_path="", - ) - defaults.update(pg_model_and_diffusion_defaults()) - parser = argparse.ArgumentParser() - add_dict_to_argparser(parser, defaults) - return parser - - -if __name__ == "__main__": - main() diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py deleted file mode 100644 index e9c5defd1cda850f9702c05a86e0671880ef5e38..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py +++ /dev/null @@ -1,105 +0,0 @@ -_base_ = [ - '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py' -] - -# model settings -model = dict( - type='CornerNet', - backbone=dict( - type='HourglassNet', - downsample_times=5, - num_stacks=2, - stage_channels=[256, 256, 384, 384, 384, 512], - stage_blocks=[2, 2, 2, 2, 2, 4], - norm_cfg=dict(type='BN', requires_grad=True)), - neck=None, - bbox_head=dict( - type='CentripetalHead', - num_classes=80, - in_channels=256, - num_feat_levels=2, - corner_emb_channels=0, - loss_heatmap=dict( - type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1), - loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1), - loss_guiding_shift=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=0.05), - loss_centripetal_shift=dict( - type='SmoothL1Loss', beta=1.0, loss_weight=1)), - # training and testing settings - train_cfg=None, - test_cfg=dict( - corner_topk=100, - local_maximum_kernel=3, - distance_threshold=0.5, - score_thr=0.05, - max_per_img=100, - nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian'))) -# data settings -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='RandomCenterCropPad', - crop_size=(511, 511), - ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3), - test_mode=False, - test_pad_mode=None, - **img_norm_cfg), - dict(type='Resize', img_scale=(511, 511), keep_ratio=False), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict( - type='MultiScaleFlipAug', - scale_factor=1.0, - flip=True, - transforms=[ - dict(type='Resize'), - dict( - type='RandomCenterCropPad', - crop_size=None, - ratios=None, - border=None, - test_mode=True, - test_pad_mode=['logical_or', 127], - **img_norm_cfg), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict( - type='Collect', - keys=['img'], - meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape', - 'scale_factor', 'flip', 'img_norm_cfg', 'border')), - ]) -] -data = dict( - samples_per_gpu=6, - workers_per_gpu=3, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='Adam', lr=0.0005) -optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=1.0 / 3, - step=[190]) -runner = dict(type='EpochBasedRunner', max_epochs=210) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/wider_face/ssd300_wider_face.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/wider_face/ssd300_wider_face.py deleted file mode 100644 index 5a3eb38df3dc75af176cc6972af88e76124ba4dc..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/wider_face/ssd300_wider_face.py +++ /dev/null @@ -1,18 +0,0 @@ -_base_ = [ - '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', - '../_base_/default_runtime.py' -] -model = dict(bbox_head=dict(num_classes=1)) -# optimizer -optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict() -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=1000, - warmup_ratio=0.001, - step=[16, 20]) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=24) -log_config = dict(interval=1) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/assigners/hungarian_assigner.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/assigners/hungarian_assigner.py deleted file mode 100644 index e10cc14afac4ddfcb9395c1a250ece1fbfe3263c..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/assigners/hungarian_assigner.py +++ /dev/null @@ -1,145 +0,0 @@ -import torch - -from ..builder import BBOX_ASSIGNERS -from ..match_costs import build_match_cost -from ..transforms import bbox_cxcywh_to_xyxy -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - -try: - from scipy.optimize import linear_sum_assignment -except ImportError: - linear_sum_assignment = None - - -@BBOX_ASSIGNERS.register_module() -class HungarianAssigner(BaseAssigner): - """Computes one-to-one matching between predictions and ground truth. - - This class computes an assignment between the targets and the predictions - based on the costs. The costs are weighted sum of three components: - classification cost, regression L1 cost and regression iou cost. The - targets don't include the no_object, so generally there are more - predictions than targets. After the one-to-one matching, the un-matched - are treated as backgrounds. Thus each query prediction will be assigned - with `0` or a positive integer indicating the ground truth index: - - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - Args: - cls_weight (int | float, optional): The scale factor for classification - cost. Default 1.0. - bbox_weight (int | float, optional): The scale factor for regression - L1 cost. Default 1.0. - iou_weight (int | float, optional): The scale factor for regression - iou cost. Default 1.0. - iou_calculator (dict | optional): The config for the iou calculation. - Default type `BboxOverlaps2D`. - iou_mode (str | optional): "iou" (intersection over union), "iof" - (intersection over foreground), or "giou" (generalized - intersection over union). Default "giou". - """ - - def __init__(self, - cls_cost=dict(type='ClassificationCost', weight=1.), - reg_cost=dict(type='BBoxL1Cost', weight=1.0), - iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): - self.cls_cost = build_match_cost(cls_cost) - self.reg_cost = build_match_cost(reg_cost) - self.iou_cost = build_match_cost(iou_cost) - - def assign(self, - bbox_pred, - cls_pred, - gt_bboxes, - gt_labels, - img_meta, - gt_bboxes_ignore=None, - eps=1e-7): - """Computes one-to-one matching based on the weighted costs. - - This method assign each query prediction to a ground truth or - background. The `assigned_gt_inds` with -1 means don't care, - 0 means negative sample, and positive number is the index (1-based) - of assigned gt. - The assignment is done in the following steps, the order matters. - - 1. assign every prediction to -1 - 2. compute the weighted costs - 3. do Hungarian matching on CPU based on the costs - 4. assign all to 0 (background) first, then for each matched pair - between predictions and gts, treat this prediction as foreground - and assign the corresponding gt index (plus 1) to it. - - Args: - bbox_pred (Tensor): Predicted boxes with normalized coordinates - (cx, cy, w, h), which are all in range [0, 1]. Shape - [num_query, 4]. - cls_pred (Tensor): Predicted classification logits, shape - [num_query, num_class]. - gt_bboxes (Tensor): Ground truth boxes with unnormalized - coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. - gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). - img_meta (dict): Meta information for current image. - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`. Default None. - eps (int | float, optional): A value added to the denominator for - numerical stability. Default 1e-7. - - Returns: - :obj:`AssignResult`: The assigned result. - """ - assert gt_bboxes_ignore is None, \ - 'Only case when gt_bboxes_ignore is None is supported.' - num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) - - # 1. assign -1 by default - assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), - -1, - dtype=torch.long) - assigned_labels = bbox_pred.new_full((num_bboxes, ), - -1, - dtype=torch.long) - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - if num_gts == 0: - # No ground truth, assign all to background - assigned_gt_inds[:] = 0 - return AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) - img_h, img_w, _ = img_meta['img_shape'] - factor = gt_bboxes.new_tensor([img_w, img_h, img_w, - img_h]).unsqueeze(0) - - # 2. compute the weighted costs - # classification and bboxcost. - cls_cost = self.cls_cost(cls_pred, gt_labels) - # regression L1 cost - normalize_gt_bboxes = gt_bboxes / factor - reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) - # regression iou cost, defaultly giou is used in official DETR. - bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor - iou_cost = self.iou_cost(bboxes, gt_bboxes) - # weighted sum of above three costs - cost = cls_cost + reg_cost + iou_cost - - # 3. do Hungarian matching on CPU using linear_sum_assignment - cost = cost.detach().cpu() - if linear_sum_assignment is None: - raise ImportError('Please run "pip install scipy" ' - 'to install scipy first.') - matched_row_inds, matched_col_inds = linear_sum_assignment(cost) - matched_row_inds = torch.from_numpy(matched_row_inds).to( - bbox_pred.device) - matched_col_inds = torch.from_numpy(matched_col_inds).to( - bbox_pred.device) - - # 4. assign backgrounds and foregrounds - # assign all indices to backgrounds first - assigned_gt_inds[:] = 0 - # assign foregrounds based on matching results - assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 - assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] - return AssignResult( - num_gts, assigned_gt_inds, None, labels=assigned_labels) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/tests/modules/test_transformer.py b/spaces/GrandaddyShmax/AudioCraft_Plus/tests/modules/test_transformer.py deleted file mode 100644 index 2bb79bfd58d535469f9b3c56b8a5fe254db5d8ba..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/tests/modules/test_transformer.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.transformer import ( - StreamingMultiheadAttention, StreamingTransformer, set_efficient_attention_backend) - - -def test_transformer_causal_streaming(): - torch.manual_seed(1234) - - for context, custom in product([None, 10], [False, True]): - # Test that causality and receptive fields are properly handled. - # looking at the gradients - tr = StreamingTransformer( - 16, 4, 1 if context else 2, - causal=True, past_context=context, custom=custom, - dropout=0.) - steps = 20 - for k in [0, 10, 15, 19]: - x = torch.randn(4, steps, 16, requires_grad=True) - y = tr(x) - y[:, k].abs().sum().backward() - if k + 1 < steps: - assert torch.allclose(x.grad[:, k + 1:], torch.tensor(0.)), x.grad[:, k + 1:].norm() - assert not torch.allclose(x.grad[:, :k + 1], torch.tensor(0.)), x.grad[:, :k + 1].norm() - if context is not None and k > context: - limit = k - context - 1 - assert torch.allclose(x.grad[:, :limit], - torch.tensor(0.)), x.grad[:, :limit].norm() - - # Now check that streaming gives the same result at batch eval. - x = torch.randn(4, steps, 16) - y = tr(x) - ys = [] - with tr.streaming(): - for k in range(steps): - chunk = x[:, k:k + 1, :] - ys.append(tr(chunk)) - y_stream = torch.cat(ys, dim=1) - delta = torch.norm(y_stream - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_transformer_vs_pytorch(): - torch.manual_seed(1234) - # Check that in the non causal setting, we get the same result as - # PyTorch Transformer encoder. - for custom in [False, True]: - tr = StreamingTransformer( - 16, 4, 2, - causal=False, custom=custom, dropout=0., positional_scale=0.) - layer = torch.nn.TransformerEncoderLayer(16, 4, dropout=0., batch_first=True) - tr_ref = torch.nn.TransformerEncoder(layer, 2) - tr.load_state_dict(tr_ref.state_dict()) - - x = torch.randn(4, 20, 16) - y = tr(x) - y2 = tr_ref(x) - delta = torch.norm(y2 - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_streaming_api(): - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0.) - tr.eval() - steps = 12 - x = torch.randn(1, steps, 16) - - with torch.no_grad(): - with tr.streaming(): - _ = tr(x[:, :1]) - state = {k: v.clone() for k, v in tr.get_streaming_state().items()} - y = tr(x[:, 1:2]) - tr.set_streaming_state(state) - y2 = tr(x[:, 1:2]) - assert torch.allclose(y, y2), (y - y2).norm() - assert tr.flush() is None - - -def test_memory_efficient(): - for backend in ['torch', 'xformers']: - torch.manual_seed(1234) - set_efficient_attention_backend(backend) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1) - tr_mem_efficient.load_state_dict(tr.state_dict()) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_mem_efficient(x) - assert torch.allclose(y, y2), ((y - y2).norm(), backend) - - -def test_attention_as_float32(): - torch.manual_seed(1234) - cases = [ - {'custom': True}, - {'custom': False}, - ] - for case in cases: - tr = StreamingTransformer(16, 4, 2, dropout=0., dtype=torch.bfloat16, **case) - tr_float32 = StreamingTransformer( - 16, 4, 2, dropout=0., attention_as_float32=True, dtype=torch.bfloat16, **case) - if not case['custom']: - # we are not using autocast here because it doesn't really - # work as expected on CPU, so we have to manually cast the weights of the MHA. - for layer in tr_float32.layers: - layer.self_attn.mha.to(torch.float32) - tr_float32.load_state_dict(tr.state_dict()) - steps = 12 - x = torch.randn(3, steps, 16, dtype=torch.bfloat16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_float32(x) - assert not torch.allclose(y, y2), (y - y2).norm() - - -@torch.no_grad() -def test_streaming_memory_efficient(): - for backend in ['torch', 'xformers']: - torch.manual_seed(1234) - set_efficient_attention_backend(backend) - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0., custom=True) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, causal=True) - tr.load_state_dict(tr_mem_efficient.state_dict()) - tr.eval() - tr_mem_efficient.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - ref = tr(x) - - with tr_mem_efficient.streaming(): - outs = [] - # frame_sizes = [2] + [1] * (steps - 2) - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr_mem_efficient(frame)) - - out = torch.cat(outs, dim=1) - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -def test_cross_attention(): - torch.manual_seed(1234) - for norm_first in [True, False]: - m = StreamingTransformer( - 16, 4, 2, cross_attention=False, norm_first=norm_first, dropout=0., custom=True) - m_cross = StreamingTransformer( - 16, 4, 2, cross_attention=True, norm_first=norm_first, dropout=0., custom=True) - m_cross.load_state_dict(m.state_dict(), strict=False) - x = torch.randn(2, 5, 16) - cross_x = torch.randn(2, 3, 16) - y_ref = m(x) - y_cross_zero = m_cross(x, cross_attention_src=0 * cross_x) - # With norm_first, the two should be exactly the same, - # but with norm_first=False, we get 2 normalization in a row - # and the epsilon value leads to a tiny change. - atol = 0. if norm_first else 1e-6 - print((y_ref - y_cross_zero).norm() / y_ref.norm()) - assert torch.allclose(y_ref, y_cross_zero, atol=atol) - - # We now expect a difference even with a generous atol of 1e-2. - y_cross = m_cross(x, cross_attention_src=cross_x) - assert not torch.allclose(y_cross, y_cross_zero, atol=1e-2) - - with pytest.raises(AssertionError): - _ = m_cross(x) - _ = m(x, cross_attention_src=cross_x) - - -def test_cross_attention_compat(): - torch.manual_seed(1234) - num_heads = 2 - dim = num_heads * 64 - with pytest.raises(AssertionError): - StreamingMultiheadAttention(dim, num_heads, causal=True, cross_attention=True) - - cross_attn = StreamingMultiheadAttention( - dim, num_heads, dropout=0, cross_attention=True, custom=True) - ref_attn = torch.nn.MultiheadAttention(dim, num_heads, dropout=0, batch_first=True) - - # We can load the regular attention state dict - # so we have compat when loading old checkpoints. - cross_attn.load_state_dict(ref_attn.state_dict()) - - queries = torch.randn(3, 7, dim) - keys = torch.randn(3, 9, dim) - values = torch.randn(3, 9, dim) - - y = cross_attn(queries, keys, values)[0] - y_ref = ref_attn(queries, keys, values)[0] - assert torch.allclose(y, y_ref, atol=1e-7), (y - y_ref).norm() / y_ref.norm() - - # Now let's check that streaming is working properly. - with cross_attn.streaming(): - ys = [] - for step in range(queries.shape[1]): - ys.append(cross_attn(queries[:, step: step + 1], keys, values)[0]) - y_streaming = torch.cat(ys, dim=1) - assert torch.allclose(y_streaming, y, atol=1e-7) - - -def test_repeat_kv(): - torch.manual_seed(1234) - num_heads = 8 - kv_repeat = 4 - dim = num_heads * 64 - with pytest.raises(AssertionError): - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, cross_attention=True) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, custom=True) - x = torch.randn(4, 18, dim) - y = mha(x, x, x)[0] - assert x.shape == y.shape - - -def test_qk_layer_norm(): - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, bias_attn=False) - steps = 12 - x = torch.randn(3, steps, 16) - y = tr(x) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, cross_attention=True) - z = torch.randn(3, 21, 16) - y = tr(x, cross_attention_src=z) - assert y.shape == x.shape diff --git a/spaces/Gustavosta/MagicPrompt-Dalle/README.md b/spaces/Gustavosta/MagicPrompt-Dalle/README.md deleted file mode 100644 index 08f9b57a71909c1d41d9dc1602afe19abf1c08fe..0000000000000000000000000000000000000000 --- a/spaces/Gustavosta/MagicPrompt-Dalle/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MagicPrompt Dalle -emoji: 🏃 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/HFUniversity2022/final-project-abubakar/README.md b/spaces/HFUniversity2022/final-project-abubakar/README.md deleted file mode 100644 index f0964c3133863f265785848a2f32045d314b891d..0000000000000000000000000000000000000000 --- a/spaces/HFUniversity2022/final-project-abubakar/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Image2story -emoji: 🐨 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.0.5 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/HaoFeng2019/DocGeoNet/inference.py b/spaces/HaoFeng2019/DocGeoNet/inference.py deleted file mode 100644 index f5d059d46963105c26b30b254a4f0d403b4fdead..0000000000000000000000000000000000000000 --- a/spaces/HaoFeng2019/DocGeoNet/inference.py +++ /dev/null @@ -1,128 +0,0 @@ -from model import DocGeoNet -from seg import U2NETP - -import torch -import torch.nn as nn -import torch.nn.functional as F -import skimage.io as io -import numpy as np -import cv2 -import os -from PIL import Image -import argparse -import warnings -warnings.filterwarnings('ignore') - - -class Net(nn.Module): - def __init__(self, opt): - super(Net, self).__init__() - self.msk = U2NETP(3, 1) - self.DocTr = DocGeoNet() - - def forward(self, x): - msk, _1,_2,_3,_4,_5,_6 = self.msk(x) - msk = (msk > 0.5).float() - x = msk * x - - _, _, bm = self.DocTr(x) - bm = (2 * (bm / 255.) - 1) * 0.99 - - return bm - - -def reload_seg_model(model, path=""): - if not bool(path): - return model - else: - model_dict = model.state_dict() - pretrained_dict = torch.load(path, map_location='cpu') - print(len(pretrained_dict.keys())) - pretrained_dict = {k[6:]: v for k, v in pretrained_dict.items() if k[6:] in model_dict} - print(len(pretrained_dict.keys())) - model_dict.update(pretrained_dict) - model.load_state_dict(model_dict) - - return model - - -def reload_rec_model(model, path=""): - if not bool(path): - return model - else: - model_dict = model.state_dict() - pretrained_dict = torch.load(path, map_location='cpu') - print(len(pretrained_dict.keys())) - pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict} - print(len(pretrained_dict.keys())) - model_dict.update(pretrained_dict) - model.load_state_dict(model_dict) - - return model - - -def rec(seg_model_path, rec_model_path, distorrted_path, save_path, opt): - print(torch.__version__) - - # distorted images list - img_list = sorted(os.listdir(distorrted_path)) - - # creat save path for rectified images - if not os.path.exists(save_path): - os.makedirs(save_path) - - net = Net(opt)#.cuda() - print(get_parameter_number(net)) - - # reload rec model - reload_rec_model(net.DocTr, rec_model_path) - reload_seg_model(net.msk, opt.seg_model_path) - - net.eval() - - for img_path in img_list: - name = img_path.split('.')[-2] # image name - img_path = distorrted_path + img_path # image path - - im_ori = np.array(Image.open(img_path))[:, :, :3] / 255. # read image 0-255 to 0-1 - h, w, _ = im_ori.shape - im = cv2.resize(im_ori, (256, 256)) - im = im.transpose(2, 0, 1) - im = torch.from_numpy(im).float().unsqueeze(0) - - with torch.no_grad(): - bm = net(im) - bm = bm.cpu() - - # save rectified image - bm0 = cv2.resize(bm[0, 0].numpy(), (w, h)) # x flow - bm1 = cv2.resize(bm[0, 1].numpy(), (w, h)) # y flow - bm0 = cv2.blur(bm0, (3, 3)) - bm1 = cv2.blur(bm1, (3, 3)) - lbl = torch.from_numpy(np.stack([bm0, bm1], axis=2)).unsqueeze(0) # h * w * 2 - out = F.grid_sample(torch.from_numpy(im_ori).permute(2, 0, 1).unsqueeze(0).float(), lbl, align_corners=True) - cv2.imwrite(save_path + name + '_rec' + '.png', ((out[0] * 255).permute(1, 2, 0).numpy())[:,:,::-1].astype(np.uint8)) - - -def get_parameter_number(net): - total_num = sum(p.numel() for p in net.parameters()) - trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad) - return {'Total': total_num, 'Trainable': trainable_num} - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--seg_model_path', default='./model_pretrained/preprocess.pth') - parser.add_argument('--rec_model_path', default='./model_pretrained/DocGeoNet.pth') - parser.add_argument('--distorrted_path', default='./distorted/') - parser.add_argument('--save_path', default='./rec/') - opt = parser.parse_args() - - rec(seg_model_path=opt.seg_model_path, - rec_model_path=opt.rec_model_path, - distorrted_path=opt.distorrted_path, - save_path=opt.save_path, - opt=opt) - -if __name__ == "__main__": - main() diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/tts_infer/num_to_word_on_sent.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/tts_infer/num_to_word_on_sent.py deleted file mode 100644 index de571c2be63fa467491d01daf0e2f38dada67de9..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/tts_infer/num_to_word_on_sent.py +++ /dev/null @@ -1,1319 +0,0 @@ -import re -import string - -# ----------------------------- indic_num.py ----------------------------- -supported_lang = {"en", "hi", "gu", "mr", "bn", "te", "ta", "kn", "or", "pa"} -# supported_lang = {'eng', 'hin', 'guj', 'mar', 'ben', 'tel', 'tam', 'kan', 'ori', 'pan'} # Three alphabet lang code - -all_num = { - "en": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], - "hi": ["०", "१", "२", "३", "४", "५", "६", "७", "८", "९"], - "gu": ["૦", "૧", "૨", "૩", "૪", "૫", "૬", "૭", "૮", "૯"], - "mr": ["०", "१", "२", "३", "४", "५", "६", "७", "८", "९"], - "bn": ["০", "১", "২", "৩", "৪", "৫", "৬", "৭", "৮", "৯"], - "te": ["౦", "౧", "౨", "౩", "౪", "౫", "౬", "౭", "౮", "౯"], - "ta": ["0", "௧", "௨", "௩", "௪", "௫", "௬", "௭", "௮", "௯", "௰"], - "kn": ["೦", "೧", "೨", "೩", "೪", "೫", "೬", "೭", "೮", "೯"], - "or": ["୦", "୧", "୨", "୩", "୪", "୫", "୬", "୭", "୮", "୯"], - "pa": ["੦", "੧", "੨", "੩", "੪", "੫", "੬", "੭", "੮", "੯"], -} - -num_dict = dict() -num_dict["en"] = { - "0": "zero", - "1": "one", - "2": "two", - "3": "three", - "4": "four", - "5": "five", - "6": "six", - "7": "seven", - "8": "eight", - "9": "nine", - "10": "ten", - "11": "eleven", - "12": "twelve", - "13": "thirteen", - "14": "fourteen", - "15": "fifteen", - "16": "sixteen", - "17": "seventeen", - "18": "eighteen", - "19": "nineteen", - "20": "twenty", - "21": "twenty-one", - "22": "twenty-two", - "23": "twenty-three", - "24": "twenty-four", - "25": "twenty-five", - "26": "twenty-six", - "27": "twenty-seven", - "28": "twenty-eight", - "29": "twenty-nine", - "30": "thirty", - "31": "thirty-one", - "32": "thirty-two", - "33": "thirty-three", - "34": "thirty-four", - "35": "thirty-five", - "36": "thirty-six", - "37": "thirty-seven", - "38": "thirty-eight", - "39": "thirty-nine", - "40": "forty", - "41": "forty-one", - "42": "forty-two", - "43": "forty-three", - "44": "forty-four", - "45": "forty-five", - "46": "forty-six", - "47": "forty-seven", - "48": "forty-eight", - "49": "forty-nine", - "50": "fifty", - "51": "fifty-one", - "52": "fifty-two", - "53": "fifty-three", - "54": "fifty-four", - "55": "fifty-five", - "56": "fifty-six", - "57": "fifty-seven", - "58": "fifty-eight", - "59": "fifty-nine", - "60": "sixty", - "61": "sixty-one", - "62": "sixty-two", - "63": "sixty-three", - "64": "sixty-four", - "65": "sixty-five", - "66": "sixty-six", - "67": "sixty-seven", - "68": "sixty-eight", - "69": "sixty-nine", - "70": "seventy", - "71": "seventy-one", - "72": "seventy-two", - "73": "seventy-three", - "74": "seventy-four", - "75": "seventy-five", - "76": "seventy-six", - "77": "seventy-seven", - "78": "seventy-eight", - "79": "seventy-nine", - "80": "eighty", - "81": "eighty-one", - "82": "eighty-two", - "83": "eighty-three", - "84": "eighty-four", - "85": "eighty-five", - "86": "eighty-six", - "87": "eighty-seven", - "88": "eighty-eight", - "89": "eighty-nine", - "90": "ninety", - "91": "ninety-one", - "92": "ninety-two", - "93": "ninety-three", - "94": "ninety-four", - "95": "ninety-five", - "96": "ninety-six", - "97": "ninety-seven", - "98": "ninety-eight", - "99": "ninety-nine", - "100": "hundred", - "1000": "thousand", - "100000": "lac", - "10000000": "crore", - "1000000000": "arab", -} # English-India -num_dict["hi"] = { - "0": "शून्य", - "1": "एक", - "2": "दो", - "3": "तीन", - "4": "चार", - "5": "पाँच", - "6": "छः", - "7": "सात", - "8": "आठ", - "9": "नौ", - "10": "दस", - "11": "ग्यारह", - "12": "बारह", - "13": "तेरह", - "14": "चौदह", - "15": "पंद्रह", - "16": "सोलह", - "17": "सत्रह", - "18": "अट्ठारह", - "19": "उन्नीस", - "20": "बीस", - "21": "इक्कीस", - "22": "बाईस", - "23": "तेईस", - "24": "चौबिस", - "25": "पच्चीस", - "26": "छब्बीस", - "27": "सत्ताईस", - "28": "अट्ठाईस", - "29": "उनतीस", - "30": "तीस", - "31": "इकतीस", - "32": "बत्तीस", - "33": "तैंतीस", - "34": "चौंतीस", - "35": "पैंतीस", - "36": "छत्तीस", - "37": "सैंतीस", - "38": "अड़तीस", - "39": "उनतालीस", - "40": "चालीस", - "41": "इकतालीस", - "42": "बयालीस", - "43": "तैंतालीस", - "44": "चौंतालीस", - "45": "पैंतालीस", - "46": "छियालीस", - "47": "सैंतालीस", - "48": "अड़तालीस", - "49": "उनचास", - "50": "पचास", - "51": "इक्यावन​", - "52": "बावन", - "53": "तिरेपन", - "54": "चौवन", - "55": "पचपन", - "56": "छप्पन", - "57": "सत्तावन", - "58": "अट्ठावन", - "59": "उनसठ", - "60": "साठ", - "61": "इकसठ", - "62": "बासठ", - "63": "तिरेसठ", - "64": "चौंसठ", - "65": "पैंसठ", - "66": "छयासठ", - "67": "सरसठ​", - "68": "अड़सठ", - "69": "उनहत्तर", - "70": "सत्तर", - "71": "इकहत्तर", - "72": "बहत्तर", - "73": "तिहत्तर", - "74": "चौहत्तर", - "75": "पचहत्तर", - "76": "छिहत्तर", - "77": "सतहत्तर", - "78": "अठहत्तर", - "79": "उन्यासी", - "80": "अस्सी", - "81": "इक्यासी", - "82": "बयासी", - "83": "तिरासी", - "84": "चौरासी", - "85": "पचासी", - "86": "छियासी", - "87": "सत्तासी", - "88": "अठासी", - "89": "नवासी", - "90": "नब्बे", - "91": "इक्यानवे", - "92": "बानवे", - "93": "तिरानवे", - "94": "चौरानवे", - "95": "पचानवे", - "96": "छियानवे", - "97": "सत्तानवे", - "98": "अट्ठानवे", - "99": "निन्यानवे", - "100": "सौ", - "1000": "हज़ार", - "100000": "लाख", - "10000000": "करोड़", - "1000000000": "अरब", -} # Hindi -num_dict["gu"] = { - "0": "શૂન્ય", - "1": "એક", - "2": "બે", - "3": "ત્રણ", - "4": "ચાર", - "5": "પાંચ", - "6": "છ", - "7": "સાત", - "8": "આઠ", - "9": "નવ", - "10": "દસ", - "11": "અગિયાર", - "12": "બાર", - "13": "તેર", - "14": "ચૌદ", - "15": "પંદર", - "16": "સોળ", - "17": "સત્તર", - "18": "અઢાર", - "19": "ઓગણિસ", - "20": "વીસ", - "21": "એકવીસ", - "22": "બાવીસ", - "23": "તેવીસ", - "24": "ચોવીસ", - "25": "પચ્ચીસ", - "26": "છવીસ", - "27": "સત્તાવીસ", - "28": "અઠ્ઠાવીસ", - "29": "ઓગણત્રીસ", - "30": "ત્રીસ", - "31": "એકત્રીસ", - "32": "બત્રીસ", - "33": "તેત્રીસ", - "34": "ચોત્રીસ", - "35": "પાંત્રીસ", - "36": "છત્રીસ", - "37": "સડત્રીસ", - "38": "અડત્રીસ", - "39": "ઓગણચાલીસ", - "40": "ચાલીસ", - "41": "એકતાલીસ", - "42": "બેતાલીસ", - "43": "ત્રેતાલીસ", - "44": "ચુંમાલીસ", - "45": "પિસ્તાલીસ", - "46": "છેતાલીસ", - "47": "સુડતાલીસ", - "48": "અડતાલીસ", - "49": "ઓગણપચાસ", - "50": "પચાસ", - "51": "એકાવન", - "52": "બાવન", - "53": "ત્રેપન", - "54": "ચોપન", - "55": "પંચાવન", - "56": "છપ્પન", - "57": "સત્તાવન", - "58": "અઠ્ઠાવન", - "59": "ઓગણસાઠ", - "60": "સાઈઠ", - "61": "એકસઠ", - "62": "બાસઠ", - "63": "ત્રેસઠ", - "64": "ચોસઠ", - "65": "પાંસઠ", - "66": "છાસઠ", - "67": "સડસઠ", - "68": "અડસઠ", - "69": "અગણોસિત્તેર", - "70": "સિત્તેર", - "71": "એકોતેર", - "72": "બોતેર", - "73": "તોતેર", - "74": "ચુમોતેર", - "75": "પંચોતેર", - "76": "છોતેર", - "77": "સિત્યોતેર", - "78": "ઇઠ્યોતેર", - "79": "ઓગણાએંસી", - "80": "એંસી", - "81": "એક્યાસી", - "82": "બ્યાસી", - "83": "ત્યાસી", - "84": "ચોર્યાસી", - "85": "પંચાસી", - "86": "છ્યાસી", - "87": "સિત્યાસી", - "88": "ઈઠ્યાસી", - "89": "નેવ્યાસી", - "90": "નેવું", - "91": "એકાણું", - "92": "બાણું", - "93": "ત્રાણું", - "94": "ચોરાણું", - "95": "પંચાણું", - "96": "છન્નું", - "97": "સત્તાણું", - "98": "અઠ્ઠાણું", - "99": "નવ્વાણું", - "100": "સો", - "1000": "હજાર", - "100000": "લાખ", - "1000000": "દસ લાખ", - "10000000": "કરોડ઼", -} # Gujarati -num_dict["mr"] = { - "0": "शून्य", - "1": "एक", - "2": "दोन", - "3": "तीन", - "4": "चार", - "5": "पाच", - "6": "सहा", - "7": "सात", - "8": "आठ", - "9": "नऊ", - "10": "दहा", - "11": "अकरा", - "12": "बारा", - "13": "तेरा", - "14": "चौदा", - "15": "पंधरा", - "16": "सोळा", - "17": "सतरा", - "18": "अठरा", - "19": "एकोणीस", - "20": "वीस", - "21": "एकवीस", - "22": "बावीस", - "23": "तेवीस", - "24": "चोवीस", - "25": "पंचवीस", - "26": "सव्वीस", - "27": "सत्तावीस", - "28": "अठ्ठावीस", - "29": "एकोणतीस", - "30": "तीस", - "31": "एकतीस", - "32": "बत्तीस", - "33": "तेहेतीस", - "34": "चौतीस", - "35": "पस्तीस", - "36": "छत्तीस", - "37": "सदतीस", - "38": "अडतीस", - "39": "एकोणचाळीस", - "40": "चाळीस", - "41": "एक्केचाळीस", - "42": "बेचाळीस", - "43": "त्रेचाळीस", - "44": "चव्वेचाळीस", - "45": "पंचेचाळीस", - "46": "सेहेचाळीस", - "47": "सत्तेचाळीस", - "48": "अठ्ठेचाळीस", - "49": "एकोणपन्नास", - "50": "पन्नास", - "51": "एक्कावन्न", - "52": "बावन्न", - "53": "त्रेपन्न", - "54": "चोपन्न", - "55": "पंचावन्न", - "56": "छप्पन्न", - "57": "सत्तावन्न", - "58": "अठ्ठावन्न", - "59": "एकोणसाठ", - "60": "साठ", - "61": "एकसष्ठ", - "62": "बासष्ठ", - "63": "त्रेसष्ठ", - "64": "चौसष्ठ", - "65": "पासष्ठ", - "66": "सहासष्ठ", - "67": "सदुसष्ठ", - "68": "अडुसष्ठ", - "69": "एकोणसत्तर", - "70": "सत्तर", - "71": "एक्काहत्तर", - "72": "बाहत्तर", - "73": "त्र्याहत्तर", - "74": "चौर्‍याहत्तर", - "75": "पंच्याहत्तर", - "76": "शहात्तर", - "77": "सत्याहत्तर", - "78": "अठ्ठ्याहत्तर", - "79": "एकोण ऐंशी", - "80": "ऐंशी", - "81": "एक्क्याऐंशी", - "82": "ब्याऐंशी", - "83": "त्र्याऐंशी", - "84": "चौऱ्याऐंशी", - "85": "पंच्याऐंशी", - "86": "शहाऐंशी", - "87": "सत्त्याऐंशी", - "88": "अठ्ठ्याऐंशी", - "89": "एकोणनव्वद", - "90": "नव्वद", - "91": "एक्क्याण्णव", - "92": "ब्याण्णव", - "93": "त्र्याण्णव", - "94": "चौऱ्याण्णव", - "95": "पंच्याण्णव", - "96": "शहाण्णव", - "97": "सत्त्याण्णव", - "98": "अठ्ठ्याण्णव", - "99": "नव्व्याण्णव", - "100": "शे", - "1000": "हजार", - "100000": "लाख", - "10000000": "कोटी", - "1000000000": "अब्ज", -} # Marathi -num_dict["bn"] = { - "0": "শূন্য", - "1": "এক", - "2": "দুই", - "3": "তিন", - "4": "চার", - "5": "পাঁচ", - "6": "ছয়", - "7": "সাত", - "8": "আট", - "9": "নয়", - "10": "দশ", - "11": "এগার", - "12": "বার", - "13": "তের", - "14": "চৌদ্দ", - "15": "পনের", - "16": "ষোল", - "17": "সতের", - "18": "আঠার", - "19": "ঊনিশ", - "20": "বিশ", - "21": "একুশ", - "22": "বাইশ", - "23": "তেইশ", - "24": "চব্বিশ", - "25": "পঁচিশ", - "26": "ছাব্বিশ", - "27": "সাতাশ", - "28": "আঠাশ", - "29": "ঊনত্রিশ", - "30": "ত্রিশ", - "31": "একত্রিশ", - "32": "বত্রিশ", - "33": "তেত্রিশ", - "34": "চৌত্রিশ", - "35": "পঁয়ত্রিশ", - "36": "ছত্রিশ", - "37": "সাঁইত্রিশ", - "38": "আটত্রিশ", - "39": "ঊনচল্লিশ", - "40": "চল্লিশ", - "41": "একচল্লিশ", - "42": "বিয়াল্লিশ", - "43": "তেতাল্লিশ", - "44": "চুয়াল্লিশ", - "45": "পঁয়তাল্লিশ", - "46": "ছেচল্লিশ", - "47": "সাতচল্লিশ", - "48": "আটচল্লিশ", - "49": "ঊনপঞ্চাশ", - "50": "পঞ্চাশ", - "51": "একান্ন", - "52": "বায়ান্ন", - "53": "তিপ্পান্ন", - "54": "চুয়ান্ন", - "55": "পঞ্চান্ন", - "56": "ছাপ্পান্ন", - "57": "সাতান্ন", - "58": "আটান্ন", - "59": "ঊনষাট", - "60": "ষাট", - "61": "একষট্টি", - "62": "বাষট্টি", - "63": "তেষট্টি", - "64": "চৌষট্টি", - "65": "পঁয়ষট্টি", - "66": "ছেষট্টি", - "67": "সাতষট্টি", - "68": "আটষট্টি", - "69": "ঊনসত্তর", - "70": "সত্তর", - "71": "একাত্তর", - "72": "বাহাত্তর", - "73": "তিয়াত্তর", - "74": "চুয়াত্তর", - "75": "পঁচাত্তর", - "76": "ছিয়াত্তর", - "77": "সাতাত্তর", - "78": "আটাত্তর", - "79": "ঊনআশি", - "80": "আশি", - "81": "একাশি", - "82": "বিরাশি", - "83": "তিরাশি", - "84": "চুরাশি", - "85": "পঁচাশি", - "86": "ছিয়াশি", - "87": "সাতাশি", - "88": "আটাশি", - "89": "ঊননব্বই", - "90": "নব্বই", - "91": "একানব্বই", - "92": "বিরানব্বই", - "93": "তিরানব্বই", - "94": "চুরানব্বই", - "95": "পঁচানব্বই", - "96": "ছিয়ানব্বই", - "97": "সাতানব্বই", - "98": "আটানব্বই", - "99": "নিরানব্বই", - "100": "শো", - "1000": "হাজার", - "100000": "লাখ", - "10000000": "কোটি", - "1000000000": "একশ’ কোটি", -} # Bengali -num_dict["te"] = { - "0": "సున్నా", - "1": "ఒకటి", - "2": "రెండు", - "3": "మూడు", - "4": "నాలుగు", - "5": "ఐదు", - "6": "ఆరు", - "7": "ఏడు", - "8": "ఎనిమిది", - "9": "తొమ్మిది", - "10": "పది", - "11": "పదకొండు", - "12": "పన్నెండు", - "13": "పదమూడు", - "14": "పద్నాలుగు", - "15": "పదిహేను", - "16": "పదహారు", - "17": "పదిహేడు", - "18": "పద్దెనిమిది", - "19": "పందొమ్మిది", - "20": "ఇరవై", - "21": "ఇరవై ఒకటి", - "22": "ఇరవై రెండు", - "23": "ఇరవై మూడు", - "24": "ఇరవై నాలుగు", - "25": "ఇరవై ఐదు", - "26": "ఇరవై ఆరు", - "27": "ఇరవై ఏడు", - "28": "ఇరవై ఎనిమిది", - "29": "ఇరవై తొమ్మిది", - "30": "ముప్పై", - "31": "ముప్పై ఒకటి", - "32": "ముప్పై రెండు", - "33": "ముప్పై మూడు", - "34": "ముప్పై నాలుగు", - "35": "ముప్పై ఐదు", - "36": "ముప్పై ఆరు", - "37": "ముప్పై ఏడు", - "38": "ముప్పై ఎనిమిది", - "39": "ముప్పై తొమ్మిది", - "40": "నలభై", - "41": "నలభై ఒకటి", - "42": "నలభై రెండు", - "43": "నలభై మూడు", - "44": "నలభై నాలుగు", - "45": "నలభై ఐదు", - "46": "నలభై ఆరు", - "47": "నలభై ఏడు", - "48": "నలభై ఎనిమిది", - "49": "నలభై తొమ్మిది", - "50": "యాభై", - "51": "యాభై ఒకటి", - "52": "యాభై రెండు", - "53": "యాభై మూడు", - "54": "యాభై నాలుగు", - "55": "యాభై ఐదు", - "56": "యాభై ఆరు", - "57": "యాభై ఏడు", - "58": "యాభై ఎనిమిది", - "59": "యాభై తొమ్మిది", - "60": "అరవై", - "61": "అరవై ఒకటి", - "62": "అరవై రెండు", - "63": "అరవై మూడు", - "64": "అరవై నాలుగు", - "65": "అరవై ఐదు", - "66": "అరవై ఆరు", - "67": "అరవై ఏడు", - "68": "అరవై ఎనిమిది", - "69": "అరవై తొమ్మిది", - "70": "డెబ్బై", - "71": "డెబ్బై ఒకటి", - "72": "డెబ్బై రెండు", - "73": "డెబ్బై మూడు", - "74": "డెబ్బై నాలుగు", - "75": "డెబ్బై ఐదు", - "76": "డెబ్బై ఆరు", - "77": "డెబ్బై ఏడు", - "78": "డెబ్బై ఎనిమిది", - "79": "డెబ్బై తొమ్మిది", - "80": "ఎనభై", - "81": "ఎనభై ఒకటి", - "82": "ఎనభై రెండు", - "83": "ఎనభై మూడు", - "84": "ఎనభై నాలుగు", - "85": "ఎనభై ఐదు", - "86": "ఎనభై ఆరు", - "87": "ఎనభై ఏడు", - "88": "ఎనభై ఎనిమిది", - "89": "ఎనభై తొమ్మిది", - "90": "తొంభై", - "91": "తొంభై ఒకటి", - "92": "తొంభై రెండు", - "93": "తొంభై మూడు", - "94": "తొంభై నాలుగు", - "95": "తొంభై ఐదు", - "96": "తొంభై ఆరు", - "97": "తొంభై ఏడు", - "98": "తొంభై ఎనిమిది", - "99": "తొంభై తొమ్మిది", - "100": "వందల", - "1000": "వేల", - "100000": "లక్షల", - "10000000": "కోట్ల", - "1000000000": "బిలియన్", -} # Telugu -num_dict["ta"] = { - "0": "பூஜ்ஜியம்", - "1": "ஒன்று", - "2": "இரண்டு", - "3": "மூன்று", - "4": "நான்கு", - "5": "ஐந்து", - "6": "ஆறு", - "7": "ஏழு", - "8": "எட்டு", - "9": "ஒன்பது", - "10": "பத்து", - "11": "பதினொன்று", - "12": "பன்னிரண்டு", - "13": "பதிமூன்று", - "14": "பதினான்கு", - "15": "பதினைந்து", - "16": "பதினாறு", - "17": "பதினேழு", - "18": "பதினெட்டு", - "19": "பத்தொன்பது", - "20": "இருபது", - "21": "இருபது ஒன்று", - "22": "இருபத்து இரண்டு", - "23": "இருபத்து மூன்று", - "24": "இருபத்து நான்கு", - "25": "இருபத்து ஐந்து", - "26": "இருபத்து ஆறு", - "27": "இருபத்து ஏழு", - "28": "இருபத்து எட்டு", - "29": "இருபத்து ஒன்பது", - "30": "முப்பது", - "31": "முப்பத்து ஒன்று", - "32": "முப்பத்து இரண்டு", - "33": "முப்பத்து மூன்று", - "34": "முப்பத்து நான்கு", - "35": "முப்பத்து ஐந்து", - "36": "முப்பத்து ஆறு", - "37": "முப்பத்து ஏழு", - "38": "முப்பத்து எட்டு", - "39": "முப்பத்து ஒன்பது", - "40": "நாற்பது", - "41": "நாற்பத்து ஒன்று", - "42": "நாற்பத்து இரண்டு", - "43": "நாற்பத்து மூன்று", - "44": "நாற்பத்து நான்கு", - "45": "நாற்பத்து ஐந்து", - "46": "நாற்பத்து ஆறு", - "47": " நாற்பத்து ஏழு", - "48": "நாற்பத்து எட்டு", - "49": "நாற்பத்து ஒன்பது", - "50": "ஐம்பது", - "51": "ஐம்பத்து ஒன்று", - "52": "ஐம்பத்து இரண்டு", - "53": "ஐம்பத்து மூன்று", - "54": "ஐம்பத்து நான்கு", - "55": "ஐம்பத்து ஐந்து", - "56": "ஐம்பத்து ஆறு", - "57": "ஐம்பத்து ஏழு", - "58": "ஐம்பத்து எட்டு", - "59": "ஐம்பத்து ஒன்பது", - "60": "அறுபது", - "61": "அறுபத்து ஒன்று", - "62": "அறுபத்து இரண்டு", - "63": "அறுபத்து மூன்று", - "64": "அறுபத்து நான்கு", - "65": "அறுபத்து ஐந்து", - "66": "அறுபத்து ஆறு", - "67": "அறுபத்து ஏழு", - "68": "அறுபத்து எட்டு", - "69": "அறுபத்து ஒன்பது", - "70": "எழுபது", - "71": "எழுபத்தி ஒன்று", - "72": "எழுபத்தி இரண்டு", - "73": "எழுபத்தி முச்சக்கர", - "74": "எழுபத்தி நான்கு", - "75": "எழுபத்தி ஐந்து", - "76": "எழுபத்தி ஆறு", - "77": "எழுபத்தி ஏழு", - "78": "எழுபத்தி எட்டு", - "79": "எழுபத்தி ஒன்பது", - "80": "எண்பது", - "81": "எண்பத்தியொன்று", - "82": "எண்பத்திரண்டு", - "83": "எண்பத்திமூன்று", - "84": "என்பதினான்கு", - "85": "என்பதினைந்து", - "86": "எண்பத்திஆறு", - "87": "எண்பத்திஏழு", - "88": "எண்பத்தியெட்டு", - "89": "எண்பத்தியொன்பது", - "90": "தொன்னூறு", - "91": "தொண்ணூற்றியொன்று", - "92": "தொண்ணூற்றிரண்டு", - "93": "தொண்ணூற்றிமூன்று", - "94": "தொண்ணூற்றிநான்கு", - "95": "தொண்ணூற்றிஐந்து", - "96": "தொண்ணூற்றியாறு", - "97": "தொண்ணூற்றியேழு", - "98": "தொண்ணூற்றியெட்டு", - "99": "தொண்ணூற்றிஒன்பது", - "100": "நூறு", - "1000": "ஆயிரம்", - "100000": "இலட்சம்", - "10000000": "கோடி", - "1000000000": "பில்லியன்", -} # Tamil -num_dict["kn"] = { - "0": "ಸೊನ್ನೆ", - "1": "ಒಂದು", - "2": "ಎರಡು", - "3": "ಮೂರು", - "4": "ನಾಲ್ಕು", - "5": "ಅಯ್ದು", - "6": "ಆರು", - "7": "ಏಳು", - "8": "ಎಂಟು", - "9": "ಒಂಬತ್ತು", - "10": "ಹತ್ತು", - "11": "ಹನ್ನೊಂದು", - "12": "ಹನ್ನೆರಡು", - "13": "ಹದಿಮೂರು", - "14": "ಹದಿನಾಲ್ಕು", - "15": "ಹದಿನೈದು", - "16": "ಹದಿನಾರು", - "17": "ಹದಿನೇಳು", - "18": "ಹದಿನೆಂಟು", - "19": "ಹತ್ತೊಂಬತ್ತು", - "20": "ಇಪ್ಪತ್ತು", - "21": "ಇಪ್ಪತ್ತ್’ಒಂದು", - "22": "ಇಪ್ಪತ್ತ್’ಎರಡು", - "23": "ಇಪ್ಪತ್ತ್’ಮೂರು", - "24": "ಇಪ್ಪತ್ತ್’ನಾಲ್ಕು", - "25": "ಇಪ್ಪತ್ತ್’ಐದು", - "26": "ಇಪ್ಪತ್ತ್’ಆರು", - "27": "ಇಪ್ಪತ್ತ್’ಏಳು", - "28": "ಇಪ್ಪತ್ತ್’ಎಂಟು", - "29": "ಇಪ್ಪತ್ತ್’ಒಂಬತ್ತು", - "30": "ಮೂವತ್ತು", - "31": "ಮುವತ್ತ್’ಒಂದು", - "32": "ಮುವತ್ತ್’ಎರಡು", - "33": "ಮುವತ್ತ್’ಮೂರು", - "34": "ಮೂವತ್ತ್’ನಾಲ್ಕು", - "35": "ಮೂವತ್ತ್’ಐದು", - "36": "ಮೂವತ್ತ್’ಆರು", - "37": "ಮೂವತ್ತ್’ಏಳು", - "38": "ಮೂವತ್ತ್’ಎಂಟು", - "39": "ಮೂವತ್ತ್’ಒಂಬತ್ತು", - "40": "ನಲವತ್ತು", - "41": "ನಲವತ್ತೊಂದು", - "42": "ನಲವತ್ತ್ ಎರಡು", - "43": "ನಲವತ್ತ್ ಮೂರು", - "44": "ನಲವತ್ತ್ ನಾಲ್ಕು", - "45": "ನಲವತ್ತೈದು", - "46": "ನಲವತ್ತಾರು", - "47": "ನಲವತ್ತೇಳು", - "48": "ನಲವತ್ತೆಂಟು", - "49": "ನಲವತ್ತೊಂಬತ್ತು", - "50": "ಐವತ್ತು", - "51": "ಐವತ್ತೊಂದು", - "52": "ಐವತ್ತೆರಡು", - "53": "ಐವತ್ತಮೂರು", - "54": "ಐವತ್ತ್ನಾಲ್ಕು", - "55": "ಐವತ್ತೈದು", - "56": "ಐವತ್ತಾರು", - "57": "ಐವತ್ತೇಳು", - "58": "ಐವತ್ತೆಂಟು", - "59": "ಐವತ್ತೊಂಬತ್ತು", - "60": "ಅರವತ್ತು", - "61": "ಅರವತ್ತೊಂದು", - "62": "ಅರವತ್ತೆರಡು", - "63": "ಅರವತ್ತ್ ಮೂರು", - "64": "ಅರವತ್ತ್ ನಾಲ್ಕು", - "65": "ಅರವತ್ತೈದು", - "66": "ಅರವತ್ತಾರು", - "67": "ಅರವತ್ತೇಳು", - "68": "ಅರವತ್ತೆಂಟು", - "69": "ಅರವತ್ತೊಂಬತ್ತು", - "70": "ಎಪ್ಪತ್ತು", - "71": "ಎಪ್ಪತ್ತೊಂದು", - "72": "ಎಪ್ಪತ್ತೆರಡು", - "73": "ಎಪ್ಪತ್ತ್ ಮೂರು", - "74": "ಎಪ್ಪತ್ತ್ ನಾಲ್ಕು", - "75": "ಎಪ್ಪತ್ತೈದು", - "76": "ಎಪ್ಪತ್ತಾರು", - "77": "ಎಪ್ಪತ್ತೇಳು", - "78": "ಎಪ್ಪತ್ತೆಂಟು", - "79": "ಎಪ್ಪತ್ತೊಂಬತ್ತು", - "80": "ಎಂಬತ್ತು", - "81": "ಎಂಬತ್ತೊಂದು", - "82": "ಎಂಬತ್ತೆರಡು", - "83": "ಎಂಬತ್ತ್ ಮೂರು", - "84": "ಎಂಬತ್ತ್ ನಾಲ್ಕು", - "85": "ಎಂಬತ್ತೈದು", - "86": "ಎಂಬತ್ತಾರು", - "87": "ಎಂಬತ್ತೇಳು", - "88": "ಎಂಬತ್ತೆಂಟು", - "89": "ಎಂಬತ್ತೊಂಬತ್ತು", - "90": "ತೊಂಬತ್ತು", - "91": "ತೊಂಬತ್ತೊಂದು", - "92": "ತೊಂಬತ್ತೆರಡು", - "93": "ತೊಂಬತ್ತ ಮೂರು", - "94": "ತೊಂಬತ್ತ ನಾಲ್ಕು", - "95": "ತೊಂಬತ್ತೈದು", - "96": "ತೊಂಬತ್ತಾರು", - "97": "ತೊಂಬತ್ತೇಳು", - "98": "ತೊಂಬತ್ತೆಂಟು", - "99": "ತೊಂಬತ್ತೊಂಬತ್ತು", - "100": "ನೂರ", - "1000": "ಸಾವಿರದ", - "100000": "ಲಕ್ಷದ", - "10000000": "ಕೋಟಿ", - "1000000000": "ಶತಕೋಟಿ", -} # Kannada -num_dict["or"] = { - "0": "ଶୁନ୍ୟ", - "1": "ଏକ", - "2": "ଦୁଇ", - "3": "ତିନି", - "4": "ଚାରି", - "5": "ପାଞ୍ଚ", - "6": "ଛଅ", - "7": "ସାତ", - "8": "ଆଠ", - "9": "ନଅ", - "10": "ନଅ", - "11": "ଏଗାର", - "12": "ବାର", - "13": "ତେର", - "14": "ଚଉଦ", - "15": "ପନ୍ଦର", - "16": "ଷୋହଳ", - "17": "ସତର", - "18": "ଅଠର", - "19": "ଊଣାଇଶ", - "20": "କୋଡିଏ", - "21": "ଏକୋଇଶି", - "22": "ବାଇଶି", - "23": "ତେଇଶି", - "24": "ଚବିଶି", - "25": "ପଚିଶି", - "26": "ଛବିଶି", - "27": "ସତାଇଶି", - "28": "ଅଠାଇଶି", - "29": "ଅଣତିରିଶି", - "30": "ତିରିଶି", - "31": "ଏକତିରିଶି", - "32": "ବତିଶି", - "33": "ତେତିଶି", - "34": "ଚଉତିରିଶି", - "35": "ପଞ୍ଚତିରିଶି", - "36": "ଛତିଶି", - "37": "ସଂଇତିରିଶି", - "38": "ଅଠତିରିଶି", - "39": "ଅଣଚାଳିଶି", - "40": "ଚାଳିଶି", - "41": "ଏକଚାଳିଶି", - "42": "ବୟାଳିଶି", - "43": "ତେୟାଳିଶି", - "44": "ଚଉରାଳିଶି", - "45": "ପଞ୍ଚଚାଳିଶି", - "46": "ଛୟାଳିଶି", - "47": "ସତଚାଳିଶି", - "48": "ଅଠଚାଳିଶି", - "49": "ଅଣଚାଶ", - "50": "ପଚାଶ", - "51": "ଏକାବନ", - "52": "ବାଉନ", - "53": "ତେପନ", - "54": "ଚଉବନ", - "55": "ପଞ୍ଚାବନ", - "56": "ଛପନ", - "57": "ସତାବନ", - "58": "ଅଠାବନ", - "59": "ଅଣଷଠି", - "60": "ଷାଠିଏ", - "61": "ଏକଷଠି", - "62": "ବାଷଠି", - "63": "ତେଷଠି", - "64": "ଚଉଷଠି", - "65": "ପଞ୍ଚଷଠି", - "66": "ଛଅଷଠି", - "67": "ସତଷଠି", - "68": "ଅଠଷଠି", - "69": "ଅଣସ୍ତରୀ", - "70": "ସତୂରୀ", - "71": "ଏକସ୍ତରୀ", - "72": "ବାସ୍ତରୀ", - "73": "ତେସ୍ତରୀ", - "74": "ଚଉସ୍ତରୀ", - "75": "ପଞ୍ଚସ୍ତରୀ", - "76": "ଛଅସ୍ତରୀ", - "77": "ସତସ୍ତରୀ", - "78": "ଅଠସ୍ତରୀ", - "79": "ଅଣାଅଶୀ", - "80": "ଅଶୀ", - "81": "ଏକାଅଶୀ", - "82": "ବୟାଅଶୀ", - "83": "ତେୟାଅଶୀ", - "84": "ଚଉରାଅଶୀ", - "85": "ପଞ୍ଚାଅଶୀ", - "86": "ଛୟାଅଶୀ", - "87": "ସତାଅଶୀ", - "88": "ଅଠାଅଶୀ", - "89": "ଅଣାନବେ", - "90": "ନବେ", - "91": "ଏକାନବେ", - "92": "ବୟାନବେ", - "93": "ତେୟାନବେ", - "94": "ଚଉରାନବେ", - "95": "ପଞ୍ଚାନବେ", - "96": "ଛୟାନବେ", - "97": "ସତାନବେ", - "98": "ଅଠାନବେ", - "99": "ଅନେଶତ", - "100": "ଶହେ", - "1000": "ହଜାର", - "100000": "ଲକ୍ଷ", - "10000000": "କୋଟି", - "1000000000": "କୋଟି", -} # Oriya -num_dict["pa"] = { - "0": "ਸਿਫਰ ", - "1": "ਇੱਕ", - "2": "ਦੋ", - "3": "ਤਿੰਨ", - "4": "ਚਾਰ", - "5": "ਪੰਜ", - "6": "ਛੇ", - "7": "ਸੱਤ", - "8": "ਅੱਠ", - "9": "ਨੌਂ", - "10": "ਦੱਸ", - "11": "ਗਿਆਰਾਂ", - "12": "ਬਾਰਾਂ", - "13": "ਤੇਰਾਂ", - "14": "ਚੌਦਾਂ", - "15": "ਪੰਦਰਾਂ", - "16": "ਸੋਲ਼ਾਂ", - "17": "ਸਤਾਰਾਂ", - "18": "ਅਠਾਰਾਂ", - "19": "ਉਨੀ", - "20": "ਵੀਹ", - "21": "ਇੱਕੀ", - "22": "ਬਾਈ", - "23": "ਤੇਈ", - "24": "ਚੌਵੀ", - "25": "ਪੰਝੀ", - "26": "ਛੱਬੀ", - "27": "ਸਤਾਈ", - "28": "ਅਠਾਈ", - "29": "ਉਨੱਤੀ", - "30": "ਤੀਹ", - "31": "ਇਕੱਤੀ", - "32": "ਬੱਤੀ", - "33": "ਤੇਤੀ", - "34": "ਚੌਂਤੀ", - "35": "ਪੈਂਤੀ", - "36": "ਛੱਤੀ", - "37": "ਸੈਂਤੀ", - "38": "ਅਠੱਤੀ", - "39": "ਉਨਤਾਲੀ", - "40": "ਚਾਲੀ", - "41": "ਇਕਤਾਲੀ", - "42": "ਬਤਾਲੀ", - "43": "ਤਰਤਾਲੀ", - "44": "ਚੌਤਾਲੀ", - "45": "ਪੰਜਤਾਲੀ", - "46": "ਛਿਆਲੀ", - "47": "ਸੰਤਾਲੀ", - "48": "ਅੱਠਤਾਲੀ", - "49": "ਉਣਿੰਜਾ", - "50": "ਪੰਜਾਹ", - "51": "ਇਕਵਿੰਜਾ", - "52": "ਬਵਿੰਜਾ", - "53": "ਤਰਵਿੰਜਾ", - "54": "ਚਰਿੰਜਾ", - "55": "ਪਚਵਿੰਜਾ", - "56": "ਛਪਿੰਜਾ", - "57": "ਸਤਵਿੰਜਾ", - "58": "ਅੱਠਵਿੰਜਾ", - "59": "ਉਣਾਠ", - "60": "ਸੱਠ", - "61": "ਇਕਾਠ", - "62": "ਬਾਠ੍ਹ", - "63": "ਤਰੇਠ੍ਹ", - "64": "ਚੌਠ੍ਹ", - "65": "ਪੈਂਠ", - "66": "ਛਿਆਠ", - "67": "ਸਤਾਹਠ", - "68": "ਅੱਠਾਠ", - "69": "ਉਣੱਤਰ", - "70": "ਸੱਤਰ", - "71": "ਇਕ੍ਹੱਤਰ", - "72": "ਬਹੱਤਰ", - "73": "ਤਹੱਤਰ", - "74": "ਚੌਹੱਤਰ", - "75": "ਪੰਜੱਤਰ", - "76": "ਛਿਹੱਤਰ", - "77": "ਸਤੱਤਰ", - "78": "ਅਠੱਤਰ", - "79": "ਉਣਾਸੀ", - "80": "ਅੱਸੀ", - "81": "ਇਕਾਸੀ", - "82": "ਬਿਆਸੀ", - "83": "ਤਰਾਸੀ", - "84": "ਚਰਾਸੀ", - "85": "ਪੰਜਾਸੀ", - "86": "ਛਿਆਸੀ", - "87": "ਸਤਾਸੀ", - "88": "ਅਠਾਸੀ", - "89": "ਉਣਾਨਵੇਂ", - "90": "ਨੱਬੇ", - "91": "ਇਕਾਨਵੇਂ", - "92": "ਬਿਆਨਵੇਂ", - "93": "ਤਰਾਨਵੇਂ", - "94": "ਚਰਾਨਵੇਂ", - "95": "ਪਚਾਨਵੇਂ", - "96": "ਛਿਆਨਵੇਂ", - "97": "ਸਤਾਨਵੇਂ", - "98": "ਅਠਾਨਵੇਂ", - "99": "ਨਿੜਾਨਵੇਂ", - "100": "ਸੌ", - "1000": "ਹਜਾਰ", - "100000": "ਲੱਖ", - "10000000": "ਕਰੋੜ", - "1000000000": "ਅਰਬ", -} # Punjabi - -# --------------------------- num_to_word.py ------------------------------ -""" -Method to convert Numbers to Words -for indian languages - -Use cases:- -1) Speech recognition pre-processing -2) Language modeling Data pre-processing - -------------------------- -check indic_numbers.py to add support -for any indian language -""" - - -def language_specific_exception(words, lang, combiner): - """ - Language Specific Exception will come here - """ - - def occurs_at_end(piece): - return words[-len(piece) :] == piece - - if lang == "mr": - words = words.replace("एक" + combiner + "शे", "शंभर") - elif lang == "gu": - words = words.replace("બે" + combiner + "સો", "બસ્સો") - elif lang == "te": - exception_dict = { - "1": "ఒక", - "100": "వంద", - "100+": "వందలు", - "1000": "వెయ్యి", - "1000+": "వేలు", - "100000": "లక్ష", - "100000+": "లక్షలు", - "10000000": "కోటి", - "10000000+": "కోట్లు", - } - - test_case = ["100", "1000", "100000", "10000000"] - for test in test_case: - test_word = num_dict["te"][test] - match = num_dict["te"]["1"] + combiner + test_word - # for numbers like : 100, 1000, 100000 - if words == match: - return exception_dict[test] - # for numbers like : 200, 4000, 800000 - elif occurs_at_end(test_word): - words = words.replace(test_word, exception_dict[test + "+"]) - # for numbers like : 105, 1076, 123993 - elif not occurs_at_end(match): - replacement = exception_dict["1"] + combiner + exception_dict[test] - words = words.replace(match, replacement) - - # Exception case for 101...199 - special_case = "ఒక" + combiner + "వంద" - words = words.replace(special_case, "నూట") - elif lang == "kn": - # special case for 100 - if words == ("ಒಂದು" + combiner + "ನೂರ"): - return "ನೂರು" - exception_dict = { - "ನೂರ": "ನೂರು", - "ಸಾವಿರದ": "ಸಾವಿರ", - "ಲಕ್ಷದ": "ಲಕ್ಷ", - "ಕೋಟಿಯ": "ಕೋಟಿ", - } - for expt in exception_dict: - if occurs_at_end(expt): - words = words.replace(expt, exception_dict[expt]) - return words - - -def num_to_word(num, lang, separator=", ", combiner=" "): - """ - Main Method - :param num: Number digits from any indian language - :param lang: Language Code from supported Language - :param separator: Separator character i.e. separator = '-' --> 'two hundred-sixty' - :param combiner: combine number with position i.e. combiner = '-' --> 'two-hundred sixty' - :return: UTF-8 String of numbers in words - """ - lang = lang.lower() - num = str(num) - - # Load dictionary according to language code - assert lang in supported_lang, "Language not supported" - num_dic = num_dict[lang] - - # dash default combiner for english-india - if (lang == "en") & (combiner == " "): - combiner = "-" - - # Remove punctuations from numbers - num = str(num).replace(",", "").replace(" ", "") - - # return word as it is if not number - if not num.isdecimal(): - return num - - # Replace native language numbers with english digits - for language in supported_lang: - for num_index in range(10): - num = num.replace(all_num[language][num_index], all_num["en"][num_index]) - - # Assert that input contains only integer number - for digit in num: - assert digit in all_num["en"], "Give proper input" - - # Process - # For Number longer than 9 digits - def all_two_digit(digits_2): - if len(digits_2) <= 1: # Provided only one/zero digit - return num_dic.get(digits_2, "") - elif digits_2 == "00": # Two Zero provided - return num_dic["0"] + separator + num_dic["0"] - elif digits_2[0] == "0": # First digit is zero - return num_dic["0"] + separator + num_dic[digits_2[1]] - else: # Both digit provided - return num_dic[digits_2] - - # For Number less than 9 digits - def two_digit(digits_2): - digits_2 = digits_2.lstrip("0") - if len(digits_2) != 0: - return num_dic[digits_2] - else: - return "" - - def all_digit(digits): - digits = digits.lstrip("0") - digit_len = len(digits) - if digit_len > 3: - num_of_digits_to_process = (digit_len % 2) + 1 - process_digits = digits[:num_of_digits_to_process] - base = str(10 ** (int(digit_len / 2) * 2 - 1)) - remain_digits = digits[num_of_digits_to_process:] - return ( - num_dic[process_digits] - + combiner - + num_dic[base] - + separator - + all_digit(remain_digits) - ) - elif len(digits) == 3: - return ( - num_dic[digits[:1]] - + combiner - + num_dic["100"] - + separator - + two_digit(digits[1:]) - ) - else: - return two_digit(digits) - - num = num.lstrip("0") - full_digit_len = len(num) - - if full_digit_len == 0: - output = num_dic["0"] - elif full_digit_len <= 9: - output = all_digit(num) - else: - iteration = round(full_digit_len / 2) - output = all_two_digit(num[:2]) # First to digit - for i in range(1, iteration): - output = ( - output + separator + all_two_digit(num[i * 2 : (i + 1) * 2]) - ) # Next two digit pairs - remaining_digits = num[iteration * 2 :] - if not all_two_digit(remaining_digits) == "": - output = ( - output + separator + all_two_digit(remaining_digits) - ) # remaining Last one/two digits - - output = output.strip(separator) - - output = language_specific_exception(output, lang, combiner) - - return output - - -# --------------------------------- num_to_word_on_a_sent --------------------------------- - - -def is_digit(word, digit_pattern): - return re.search(digit_pattern, word) - - -def remove_punct(sent): - clean = re.sub("[%s]" % re.escape(string.punctuation), " ", sent) - return " ".join([word for word in clean.split() if word]) - - -def normalize_nums(text, lang): - """ - text: str (eg) - lang: lang code ['en', 'hi'] - - returns: str - (eg) - """ - - if lang in supported_lang: - text = text.replace('-',' - ') # space separate hyphen - words = text.split() - lang_digits = [str(i) for i in range(0, 10)] - - digit_pattern = "[" + "".join(lang_digits) + "]" - num_indices = [ - ind for ind, word in enumerate(words) if is_digit(word, digit_pattern) - ] - - words_up = [ - num_to_word(word, lang, separator=" ", combiner=" ") - if ind in num_indices - else word - for ind, word in enumerate(words) - ] - return " ".join(words_up) - else: - return text - - -if __name__ == "__main__": - print(normalize_nums("रीटा के पास 16 बिल्लियाँ हैं।", "hi")) diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/setup.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/setup.py deleted file mode 100644 index 9d2c73345b8406195aaa6327cb3148bb92b65190..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/setup.py +++ /dev/null @@ -1,55 +0,0 @@ -from setuptools import setup, find_packages - -with open("README.md", "r") as f: - long_description = f.read() - -setup( - name="vakyansh-tts", - version="0.0.5", - description="Text to speech for Indic languages", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/Open-Speech-EkStep/vakyansh-tts.git", - keywords="nlp, tts, Indic languages, deep learning, text to speech", - # package_dir={'': 'src'}, - # packages=find_packages(where='src'), - packages=["tts_infer"], - python_requires=">=3.7, <4", - install_requires=[ - "Cython==0.29.24", - "layers==0.1.5", - "librosa==0.8.1", - "matplotlib==3.3.4", - "numpy==1.20.2", - "scipy==1.5.4", - "tensorboardX==2.4", - "tensorboard==2.7.0", - "tqdm==4.62.3", - "fastapi==0.70.0", - "uvicorn==0.15.0", - "gradio==2.5.2", - "wavio==0.0.4", - "pydload==1.0.9", - "mosestokenizer==1.2.1", - "indic-nlp-library==0.81" - ], - classifiers=[ - # How mature is this project? Common values are - # 3 - Alpha - # 4 - Beta - # 5 - Production/Stable - "Development Status :: 3 - Alpha", - # Indicate who your project is intended for - "Intended Audience :: Developers", - "Intended Audience :: Education", - "Intended Audience :: Science/Research", - "Topic :: Scientific/Engineering :: Artificial Intelligence", - "Topic :: Text Processing :: Linguistic", - # Pick your license as you wish (should match "license" above) - "License :: OSI Approved :: MIT License", - # Specify the Python versions you support here. In particular, ensure - # that you indicate whether you support Python 2, Python 3 or both. - "Programming Language :: Python :: 3.7", - ], - include_package_data=True, -) diff --git a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/init.py b/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/init.py deleted file mode 100644 index 39dd83dbd55475d562a3f54d951cb822800d2e0f..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/src/glow_tts/init.py +++ /dev/null @@ -1,79 +0,0 @@ -import os -import json -import argparse -import math -import torch -from torch import nn, optim -from torch.nn import functional as F -from torch.utils.data import DataLoader - -from data_utils import TextMelLoader, TextMelCollate -import models -import commons -import utils - - -class FlowGenerator_DDI(models.FlowGenerator): - """A helper for Data-dependent Initialization""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - for f in self.decoder.flows: - if getattr(f, "set_ddi", False): - f.set_ddi(True) - - -def main(): - hps = utils.get_hparams() - logger = utils.get_logger(hps.log_dir) - logger.info(hps) - utils.check_git_hash(hps.log_dir) - - torch.manual_seed(hps.train.seed) - - train_dataset = TextMelLoader(hps.data.training_files, hps.data) - collate_fn = TextMelCollate(1) - train_loader = DataLoader( - train_dataset, - num_workers=8, - shuffle=True, - batch_size=hps.train.batch_size, - pin_memory=True, - drop_last=True, - collate_fn=collate_fn, - ) - symbols = hps.data.punc + hps.data.chars - generator = FlowGenerator_DDI( - len(symbols) + getattr(hps.data, "add_blank", False), - out_channels=hps.data.n_mel_channels, - **hps.model - ).cuda() - optimizer_g = commons.Adam( - generator.parameters(), - scheduler=hps.train.scheduler, - dim_model=hps.model.hidden_channels, - warmup_steps=hps.train.warmup_steps, - lr=hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - - generator.train() - for batch_idx, (x, x_lengths, y, y_lengths) in enumerate(train_loader): - x, x_lengths = x.cuda(), x_lengths.cuda() - y, y_lengths = y.cuda(), y_lengths.cuda() - - _ = generator(x, x_lengths, y, y_lengths, gen=False) - break - - utils.save_checkpoint( - generator, - optimizer_g, - hps.train.learning_rate, - 0, - os.path.join(hps.model_dir, "ddi_G.pth"), - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/utils/inference/num_to_word_on_sent.py b/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/utils/inference/num_to_word_on_sent.py deleted file mode 100644 index ce878a8c3ee6f5ef629abeaee418d5959f7179ed..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/utils/inference/num_to_word_on_sent.py +++ /dev/null @@ -1,1314 +0,0 @@ -import re -import string - -# ----------------------------- indic_num.py ----------------------------- -supported_lang = {"en", "hi", "gu", "mr", "bn", "te", "ta", "kn", "or", "pa"} -# supported_lang = {'eng', 'hin', 'guj', 'mar', 'ben', 'tel', 'tam', 'kan', 'ori', 'pan'} # Three alphabet lang code - -all_num = { - "en": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], - "hi": ["०", "१", "२", "३", "४", "५", "६", "७", "८", "९"], - "gu": ["૦", "૧", "૨", "૩", "૪", "૫", "૬", "૭", "૮", "૯"], - "mr": ["०", "१", "२", "३", "४", "५", "६", "७", "८", "९"], - "bn": ["০", "১", "২", "৩", "৪", "৫", "৬", "৭", "৮", "৯"], - "te": ["౦", "౧", "౨", "౩", "౪", "౫", "౬", "౭", "౮", "౯"], - "ta": ["0", "௧", "௨", "௩", "௪", "௫", "௬", "௭", "௮", "௯", "௰"], - "kn": ["೦", "೧", "೨", "೩", "೪", "೫", "೬", "೭", "೮", "೯"], - "or": ["୦", "୧", "୨", "୩", "୪", "୫", "୬", "୭", "୮", "୯"], - "pa": ["੦", "੧", "੨", "੩", "੪", "੫", "੬", "੭", "੮", "੯"], -} - -num_dict = dict() -num_dict["en"] = { - "0": "zero", - "1": "one", - "2": "two", - "3": "three", - "4": "four", - "5": "five", - "6": "six", - "7": "seven", - "8": "eight", - "9": "nine", - "10": "ten", - "11": "eleven", - "12": "twelve", - "13": "thirteen", - "14": "fourteen", - "15": "fifteen", - "16": "sixteen", - "17": "seventeen", - "18": "eighteen", - "19": "nineteen", - "20": "twenty", - "21": "twenty-one", - "22": "twenty-two", - "23": "twenty-three", - "24": "twenty-four", - "25": "twenty-five", - "26": "twenty-six", - "27": "twenty-seven", - "28": "twenty-eight", - "29": "twenty-nine", - "30": "thirty", - "31": "thirty-one", - "32": "thirty-two", - "33": "thirty-three", - "34": "thirty-four", - "35": "thirty-five", - "36": "thirty-six", - "37": "thirty-seven", - "38": "thirty-eight", - "39": "thirty-nine", - "40": "forty", - "41": "forty-one", - "42": "forty-two", - "43": "forty-three", - "44": "forty-four", - "45": "forty-five", - "46": "forty-six", - "47": "forty-seven", - "48": "forty-eight", - "49": "forty-nine", - "50": "fifty", - "51": "fifty-one", - "52": "fifty-two", - "53": "fifty-three", - "54": "fifty-four", - "55": "fifty-five", - "56": "fifty-six", - "57": "fifty-seven", - "58": "fifty-eight", - "59": "fifty-nine", - "60": "sixty", - "61": "sixty-one", - "62": "sixty-two", - "63": "sixty-three", - "64": "sixty-four", - "65": "sixty-five", - "66": "sixty-six", - "67": "sixty-seven", - "68": "sixty-eight", - "69": "sixty-nine", - "70": "seventy", - "71": "seventy-one", - "72": "seventy-two", - "73": "seventy-three", - "74": "seventy-four", - "75": "seventy-five", - "76": "seventy-six", - "77": "seventy-seven", - "78": "seventy-eight", - "79": "seventy-nine", - "80": "eighty", - "81": "eighty-one", - "82": "eighty-two", - "83": "eighty-three", - "84": "eighty-four", - "85": "eighty-five", - "86": "eighty-six", - "87": "eighty-seven", - "88": "eighty-eight", - "89": "eighty-nine", - "90": "ninety", - "91": "ninety-one", - "92": "ninety-two", - "93": "ninety-three", - "94": "ninety-four", - "95": "ninety-five", - "96": "ninety-six", - "97": "ninety-seven", - "98": "ninety-eight", - "99": "ninety-nine", - "100": "hundred", - "1000": "thousand", - "100000": "lac", - "10000000": "crore", - "1000000000": "arab", -} # English-India -num_dict["hi"] = { - "0": "शून्य", - "1": "एक", - "2": "दो", - "3": "तीन", - "4": "चार", - "5": "पाँच", - "6": "छः", - "7": "सात", - "8": "आठ", - "9": "नौ", - "10": "दस", - "11": "ग्यारह", - "12": "बारह", - "13": "तेरह", - "14": "चौदह", - "15": "पंद्रह", - "16": "सोलह", - "17": "सत्रह", - "18": "अट्ठारह", - "19": "उन्नीस", - "20": "बीस", - "21": "इक्कीस", - "22": "बाईस", - "23": "तेईस", - "24": "चौबिस", - "25": "पच्चीस", - "26": "छब्बीस", - "27": "सत्ताईस", - "28": "अट्ठाईस", - "29": "उनतीस", - "30": "तीस", - "31": "इकतीस", - "32": "बत्तीस", - "33": "तैंतीस", - "34": "चौंतीस", - "35": "पैंतीस", - "36": "छत्तीस", - "37": "सैंतीस", - "38": "अड़तीस", - "39": "उनतालीस", - "40": "चालीस", - "41": "इकतालीस", - "42": "बयालीस", - "43": "तैंतालीस", - "44": "चौंतालीस", - "45": "पैंतालीस", - "46": "छियालीस", - "47": "सैंतालीस", - "48": "अड़तालीस", - "49": "उनचास", - "50": "पचास", - "51": "इक्यावन​", - "52": "बावन", - "53": "तिरेपन", - "54": "चौवन", - "55": "पचपन", - "56": "छप्पन", - "57": "सत्तावन", - "58": "अट्ठावन", - "59": "उनसठ", - "60": "साठ", - "61": "इकसठ", - "62": "बासठ", - "63": "तिरेसठ", - "64": "चौंसठ", - "65": "पैंसठ", - "66": "छयासठ", - "67": "सरसठ​", - "68": "अड़सठ", - "69": "उनहत्तर", - "70": "सत्तर", - "71": "इकहत्तर", - "72": "बहत्तर", - "73": "तिहत्तर", - "74": "चौहत्तर", - "75": "पचहत्तर", - "76": "छिहत्तर", - "77": "सतहत्तर", - "78": "अठहत्तर", - "79": "उन्यासी", - "80": "अस्सी", - "81": "इक्यासी", - "82": "बयासी", - "83": "तिरासी", - "84": "चौरासी", - "85": "पचासी", - "86": "छियासी", - "87": "सत्तासी", - "88": "अठासी", - "89": "नवासी", - "90": "नब्बे", - "91": "इक्यानवे", - "92": "बानवे", - "93": "तिरानवे", - "94": "चौरानवे", - "95": "पचानवे", - "96": "छियानवे", - "97": "सत्तानवे", - "98": "अट्ठानवे", - "99": "निन्यानवे", - "100": "सौ", - "1000": "हज़ार", - "100000": "लाख", - "10000000": "करोड़", - "1000000000": "अरब", -} # Hindi -num_dict["gu"] = { - "0": "શૂન્ય", - "1": "એક", - "2": "બે", - "3": "ત્રણ", - "4": "ચાર", - "5": "પાંચ", - "6": "છ", - "7": "સાત", - "8": "આઠ", - "9": "નવ", - "10": "દસ", - "11": "અગિયાર", - "12": "બાર", - "13": "તેર", - "14": "ચૌદ", - "15": "પંદર", - "16": "સોળ", - "17": "સત્તર", - "18": "અઢાર", - "19": "ઓગણિસ", - "20": "વીસ", - "21": "એકવીસ", - "22": "બાવીસ", - "23": "તેવીસ", - "24": "ચોવીસ", - "25": "પચ્ચીસ", - "26": "છવીસ", - "27": "સત્તાવીસ", - "28": "અઠ્ઠાવીસ", - "29": "ઓગણત્રીસ", - "30": "ત્રીસ", - "31": "એકત્રીસ", - "32": "બત્રીસ", - "33": "તેત્રીસ", - "34": "ચોત્રીસ", - "35": "પાંત્રીસ", - "36": "છત્રીસ", - "37": "સડત્રીસ", - "38": "અડત્રીસ", - "39": "ઓગણચાલીસ", - "40": "ચાલીસ", - "41": "એકતાલીસ", - "42": "બેતાલીસ", - "43": "ત્રેતાલીસ", - "44": "ચુંમાલીસ", - "45": "પિસ્તાલીસ", - "46": "છેતાલીસ", - "47": "સુડતાલીસ", - "48": "અડતાલીસ", - "49": "ઓગણપચાસ", - "50": "પચાસ", - "51": "એકાવન", - "52": "બાવન", - "53": "ત્રેપન", - "54": "ચોપન", - "55": "પંચાવન", - "56": "છપ્પન", - "57": "સત્તાવન", - "58": "અઠ્ઠાવન", - "59": "ઓગણસાઠ", - "60": "સાઈઠ", - "61": "એકસઠ", - "62": "બાસઠ", - "63": "ત્રેસઠ", - "64": "ચોસઠ", - "65": "પાંસઠ", - "66": "છાસઠ", - "67": "સડસઠ", - "68": "અડસઠ", - "69": "અગણોસિત્તેર", - "70": "સિત્તેર", - "71": "એકોતેર", - "72": "બોતેર", - "73": "તોતેર", - "74": "ચુમોતેર", - "75": "પંચોતેર", - "76": "છોતેર", - "77": "સિત્યોતેર", - "78": "ઇઠ્યોતેર", - "79": "ઓગણાએંસી", - "80": "એંસી", - "81": "એક્યાસી", - "82": "બ્યાસી", - "83": "ત્યાસી", - "84": "ચોર્યાસી", - "85": "પંચાસી", - "86": "છ્યાસી", - "87": "સિત્યાસી", - "88": "ઈઠ્યાસી", - "89": "નેવ્યાસી", - "90": "નેવું", - "91": "એકાણું", - "92": "બાણું", - "93": "ત્રાણું", - "94": "ચોરાણું", - "95": "પંચાણું", - "96": "છન્નું", - "97": "સત્તાણું", - "98": "અઠ્ઠાણું", - "99": "નવ્વાણું", - "100": "સો", - "1000": "હજાર", - "100000": "લાખ", - "1000000": "દસ લાખ", - "10000000": "કરોડ઼", -} # Gujarati -num_dict["mr"] = { - "0": "शून्य", - "1": "एक", - "2": "दोन", - "3": "तीन", - "4": "चार", - "5": "पाच", - "6": "सहा", - "7": "सात", - "8": "आठ", - "9": "नऊ", - "10": "दहा", - "11": "अकरा", - "12": "बारा", - "13": "तेरा", - "14": "चौदा", - "15": "पंधरा", - "16": "सोळा", - "17": "सतरा", - "18": "अठरा", - "19": "एकोणीस", - "20": "वीस", - "21": "एकवीस", - "22": "बावीस", - "23": "तेवीस", - "24": "चोवीस", - "25": "पंचवीस", - "26": "सव्वीस", - "27": "सत्तावीस", - "28": "अठ्ठावीस", - "29": "एकोणतीस", - "30": "तीस", - "31": "एकतीस", - "32": "बत्तीस", - "33": "तेहेतीस", - "34": "चौतीस", - "35": "पस्तीस", - "36": "छत्तीस", - "37": "सदतीस", - "38": "अडतीस", - "39": "एकोणचाळीस", - "40": "चाळीस", - "41": "एक्केचाळीस", - "42": "बेचाळीस", - "43": "त्रेचाळीस", - "44": "चव्वेचाळीस", - "45": "पंचेचाळीस", - "46": "सेहेचाळीस", - "47": "सत्तेचाळीस", - "48": "अठ्ठेचाळीस", - "49": "एकोणपन्नास", - "50": "पन्नास", - "51": "एक्कावन्न", - "52": "बावन्न", - "53": "त्रेपन्न", - "54": "चोपन्न", - "55": "पंचावन्न", - "56": "छप्पन्न", - "57": "सत्तावन्न", - "58": "अठ्ठावन्न", - "59": "एकोणसाठ", - "60": "साठ", - "61": "एकसष्ठ", - "62": "बासष्ठ", - "63": "त्रेसष्ठ", - "64": "चौसष्ठ", - "65": "पासष्ठ", - "66": "सहासष्ठ", - "67": "सदुसष्ठ", - "68": "अडुसष्ठ", - "69": "एकोणसत्तर", - "70": "सत्तर", - "71": "एक्काहत्तर", - "72": "बाहत्तर", - "73": "त्र्याहत्तर", - "74": "चौर्‍याहत्तर", - "75": "पंच्याहत्तर", - "76": "शहात्तर", - "77": "सत्याहत्तर", - "78": "अठ्ठ्याहत्तर", - "79": "एकोण ऐंशी", - "80": "ऐंशी", - "81": "एक्क्याऐंशी", - "82": "ब्याऐंशी", - "83": "त्र्याऐंशी", - "84": "चौऱ्याऐंशी", - "85": "पंच्याऐंशी", - "86": "शहाऐंशी", - "87": "सत्त्याऐंशी", - "88": "अठ्ठ्याऐंशी", - "89": "एकोणनव्वद", - "90": "नव्वद", - "91": "एक्क्याण्णव", - "92": "ब्याण्णव", - "93": "त्र्याण्णव", - "94": "चौऱ्याण्णव", - "95": "पंच्याण्णव", - "96": "शहाण्णव", - "97": "सत्त्याण्णव", - "98": "अठ्ठ्याण्णव", - "99": "नव्व्याण्णव", - "100": "शे", - "1000": "हजार", - "100000": "लाख", - "10000000": "कोटी", - "1000000000": "अब्ज", -} # Marathi -num_dict["bn"] = { - "0": "শূন্য", - "1": "এক", - "2": "দুই", - "3": "তিন", - "4": "চার", - "5": "পাঁচ", - "6": "ছয়", - "7": "সাত", - "8": "আট", - "9": "নয়", - "10": "দশ", - "11": "এগার", - "12": "বার", - "13": "তের", - "14": "চৌদ্দ", - "15": "পনের", - "16": "ষোল", - "17": "সতের", - "18": "আঠার", - "19": "ঊনিশ", - "20": "বিশ", - "21": "একুশ", - "22": "বাইশ", - "23": "তেইশ", - "24": "চব্বিশ", - "25": "পঁচিশ", - "26": "ছাব্বিশ", - "27": "সাতাশ", - "28": "আঠাশ", - "29": "ঊনত্রিশ", - "30": "ত্রিশ", - "31": "একত্রিশ", - "32": "বত্রিশ", - "33": "তেত্রিশ", - "34": "চৌত্রিশ", - "35": "পঁয়ত্রিশ", - "36": "ছত্রিশ", - "37": "সাঁইত্রিশ", - "38": "আটত্রিশ", - "39": "ঊনচল্লিশ", - "40": "চল্লিশ", - "41": "একচল্লিশ", - "42": "বিয়াল্লিশ", - "43": "তেতাল্লিশ", - "44": "চুয়াল্লিশ", - "45": "পঁয়তাল্লিশ", - "46": "ছেচল্লিশ", - "47": "সাতচল্লিশ", - "48": "আটচল্লিশ", - "49": "ঊনপঞ্চাশ", - "50": "পঞ্চাশ", - "51": "একান্ন", - "52": "বায়ান্ন", - "53": "তিপ্পান্ন", - "54": "চুয়ান্ন", - "55": "পঞ্চান্ন", - "56": "ছাপ্পান্ন", - "57": "সাতান্ন", - "58": "আটান্ন", - "59": "ঊনষাট", - "60": "ষাট", - "61": "একষট্টি", - "62": "বাষট্টি", - "63": "তেষট্টি", - "64": "চৌষট্টি", - "65": "পঁয়ষট্টি", - "66": "ছেষট্টি", - "67": "সাতষট্টি", - "68": "আটষট্টি", - "69": "ঊনসত্তর", - "70": "সত্তর", - "71": "একাত্তর", - "72": "বাহাত্তর", - "73": "তিয়াত্তর", - "74": "চুয়াত্তর", - "75": "পঁচাত্তর", - "76": "ছিয়াত্তর", - "77": "সাতাত্তর", - "78": "আটাত্তর", - "79": "ঊনআশি", - "80": "আশি", - "81": "একাশি", - "82": "বিরাশি", - "83": "তিরাশি", - "84": "চুরাশি", - "85": "পঁচাশি", - "86": "ছিয়াশি", - "87": "সাতাশি", - "88": "আটাশি", - "89": "ঊননব্বই", - "90": "নব্বই", - "91": "একানব্বই", - "92": "বিরানব্বই", - "93": "তিরানব্বই", - "94": "চুরানব্বই", - "95": "পঁচানব্বই", - "96": "ছিয়ানব্বই", - "97": "সাতানব্বই", - "98": "আটানব্বই", - "99": "নিরানব্বই", - "100": "শো", - "1000": "হাজার", - "100000": "লাখ", - "10000000": "কোটি", - "1000000000": "একশ’ কোটি", -} # Bengali -num_dict["te"] = { - "0": "సున్నా", - "1": "ఒకటి", - "2": "రెండు", - "3": "మూడు", - "4": "నాలుగు", - "5": "ఐదు", - "6": "ఆరు", - "7": "ఏడు", - "8": "ఎనిమిది", - "9": "తొమ్మిది", - "10": "పది", - "11": "పదకొండు", - "12": "పన్నెండు", - "13": "పదమూడు", - "14": "పద్నాలుగు", - "15": "పదిహేను", - "16": "పదహారు", - "17": "పదిహేడు", - "18": "పద్దెనిమిది", - "19": "పందొమ్మిది", - "20": "ఇరవై", - "21": "ఇరవై ఒకటి", - "22": "ఇరవై రెండు", - "23": "ఇరవై మూడు", - "24": "ఇరవై నాలుగు", - "25": "ఇరవై ఐదు", - "26": "ఇరవై ఆరు", - "27": "ఇరవై ఏడు", - "28": "ఇరవై ఎనిమిది", - "29": "ఇరవై తొమ్మిది", - "30": "ముప్పై", - "31": "ముప్పై ఒకటి", - "32": "ముప్పై రెండు", - "33": "ముప్పై మూడు", - "34": "ముప్పై నాలుగు", - "35": "ముప్పై ఐదు", - "36": "ముప్పై ఆరు", - "37": "ముప్పై ఏడు", - "38": "ముప్పై ఎనిమిది", - "39": "ముప్పై తొమ్మిది", - "40": "నలభై", - "41": "నలభై ఒకటి", - "42": "నలభై రెండు", - "43": "నలభై మూడు", - "44": "నలభై నాలుగు", - "45": "నలభై ఐదు", - "46": "నలభై ఆరు", - "47": "నలభై ఏడు", - "48": "నలభై ఎనిమిది", - "49": "నలభై తొమ్మిది", - "50": "యాభై", - "51": "యాభై ఒకటి", - "52": "యాభై రెండు", - "53": "యాభై మూడు", - "54": "యాభై నాలుగు", - "55": "యాభై ఐదు", - "56": "యాభై ఆరు", - "57": "యాభై ఏడు", - "58": "యాభై ఎనిమిది", - "59": "యాభై తొమ్మిది", - "60": "అరవై", - "61": "అరవై ఒకటి", - "62": "అరవై రెండు", - "63": "అరవై మూడు", - "64": "అరవై నాలుగు", - "65": "అరవై ఐదు", - "66": "అరవై ఆరు", - "67": "అరవై ఏడు", - "68": "అరవై ఎనిమిది", - "69": "అరవై తొమ్మిది", - "70": "డెబ్బై", - "71": "డెబ్బై ఒకటి", - "72": "డెబ్బై రెండు", - "73": "డెబ్బై మూడు", - "74": "డెబ్బై నాలుగు", - "75": "డెబ్బై ఐదు", - "76": "డెబ్బై ఆరు", - "77": "డెబ్బై ఏడు", - "78": "డెబ్బై ఎనిమిది", - "79": "డెబ్బై తొమ్మిది", - "80": "ఎనభై", - "81": "ఎనభై ఒకటి", - "82": "ఎనభై రెండు", - "83": "ఎనభై మూడు", - "84": "ఎనభై నాలుగు", - "85": "ఎనభై ఐదు", - "86": "ఎనభై ఆరు", - "87": "ఎనభై ఏడు", - "88": "ఎనభై ఎనిమిది", - "89": "ఎనభై తొమ్మిది", - "90": "తొంభై", - "91": "తొంభై ఒకటి", - "92": "తొంభై రెండు", - "93": "తొంభై మూడు", - "94": "తొంభై నాలుగు", - "95": "తొంభై ఐదు", - "96": "తొంభై ఆరు", - "97": "తొంభై ఏడు", - "98": "తొంభై ఎనిమిది", - "99": "తొంభై తొమ్మిది", - "100": "వందల", - "1000": "వేల", - "100000": "లక్షల", - "10000000": "కోట్ల", - "1000000000": "బిలియన్", -} # Telugu -num_dict["ta"] = { - "0": "பூஜ்ஜியம்", - "1": "ஒன்று", - "2": "இரண்டு", - "3": "மூன்று", - "4": "நான்கு", - "5": "ஐந்து", - "6": "ஆறு", - "7": "ஏழு", - "8": "எட்டு", - "9": "ஒன்பது", - "10": "பத்து", - "11": "பதினொன்று", - "12": "பன்னிரண்டு", - "13": "பதிமூன்று", - "14": "பதினான்கு", - "15": "பதினைந்து", - "16": "பதினாறு", - "17": "பதினேழு", - "18": "பதினெட்டு", - "19": "பத்தொன்பது", - "20": "இருபது", - "21": "இருபது ஒன்று", - "22": "இருபத்து இரண்டு", - "23": "இருபத்து மூன்று", - "24": "இருபத்து நான்கு", - "25": "இருபத்து ஐந்து", - "26": "இருபத்து ஆறு", - "27": "இருபத்து ஏழு", - "28": "இருபத்து எட்டு", - "29": "இருபத்து ஒன்பது", - "30": "முப்பது", - "31": "முப்பத்து ஒன்று", - "32": "முப்பத்து இரண்டு", - "33": "முப்பத்து மூன்று", - "34": "முப்பத்து நான்கு", - "35": "முப்பத்து ஐந்து", - "36": "முப்பத்து ஆறு", - "37": "முப்பத்து ஏழு", - "38": "முப்பத்து எட்டு", - "39": "முப்பத்து ஒன்பது", - "40": "நாற்பது", - "41": "நாற்பத்து ஒன்று", - "42": "நாற்பத்து இரண்டு", - "43": "நாற்பத்து மூன்று", - "44": "நாற்பத்து நான்கு", - "45": "நாற்பத்து ஐந்து", - "46": "நாற்பத்து ஆறு", - "47": " நாற்பத்து ஏழு", - "48": "நாற்பத்து எட்டு", - "49": "நாற்பத்து ஒன்பது", - "50": "ஐம்பது", - "51": "ஐம்பத்து ஒன்று", - "52": "ஐம்பத்து இரண்டு", - "53": "ஐம்பத்து மூன்று", - "54": "ஐம்பத்து நான்கு", - "55": "ஐம்பத்து ஐந்து", - "56": "ஐம்பத்து ஆறு", - "57": "ஐம்பத்து ஏழு", - "58": "ஐம்பத்து எட்டு", - "59": "ஐம்பத்து ஒன்பது", - "60": "அறுபது", - "61": "அறுபத்து ஒன்று", - "62": "அறுபத்து இரண்டு", - "63": "அறுபத்து மூன்று", - "64": "அறுபத்து நான்கு", - "65": "அறுபத்து ஐந்து", - "66": "அறுபத்து ஆறு", - "67": "அறுபத்து ஏழு", - "68": "அறுபத்து எட்டு", - "69": "அறுபத்து ஒன்பது", - "70": "எழுபது", - "71": "எழுபத்தி ஒன்று", - "72": "எழுபத்தி இரண்டு", - "73": "எழுபத்தி முச்சக்கர", - "74": "எழுபத்தி நான்கு", - "75": "எழுபத்தி ஐந்து", - "76": "எழுபத்தி ஆறு", - "77": "எழுபத்தி ஏழு", - "78": "எழுபத்தி எட்டு", - "79": "எழுபத்தி ஒன்பது", - "80": "எண்பது", - "81": "எண்பத்தியொன்று", - "82": "எண்பத்திரண்டு", - "83": "எண்பத்திமூன்று", - "84": "என்பதினான்கு", - "85": "என்பதினைந்து", - "86": "எண்பத்திஆறு", - "87": "எண்பத்திஏழு", - "88": "எண்பத்தியெட்டு", - "89": "எண்பத்தியொன்பது", - "90": "தொன்னூறு", - "91": "தொண்ணூற்றியொன்று", - "92": "தொண்ணூற்றிரண்டு", - "93": "தொண்ணூற்றிமூன்று", - "94": "தொண்ணூற்றிநான்கு", - "95": "தொண்ணூற்றிஐந்து", - "96": "தொண்ணூற்றியாறு", - "97": "தொண்ணூற்றியேழு", - "98": "தொண்ணூற்றியெட்டு", - "99": "தொண்ணூற்றிஒன்பது", - "100": "நூறு", - "1000": "ஆயிரம்", - "100000": "இலட்சம்", - "10000000": "கோடி", - "1000000000": "பில்லியன்", -} # Tamil -num_dict["kn"] = { - "0": "ಸೊನ್ನೆ", - "1": "ಒಂದು", - "2": "ಎರಡು", - "3": "ಮೂರು", - "4": "ನಾಲ್ಕು", - "5": "ಅಯ್ದು", - "6": "ಆರು", - "7": "ಏಳು", - "8": "ಎಂಟು", - "9": "ಒಂಬತ್ತು", - "10": "ಹತ್ತು", - "11": "ಹನ್ನೊಂದು", - "12": "ಹನ್ನೆರಡು", - "13": "ಹದಿಮೂರು", - "14": "ಹದಿನಾಲ್ಕು", - "15": "ಹದಿನೈದು", - "16": "ಹದಿನಾರು", - "17": "ಹದಿನೇಳು", - "18": "ಹದಿನೆಂಟು", - "19": "ಹತ್ತೊಂಬತ್ತು", - "20": "ಇಪ್ಪತ್ತು", - "21": "ಇಪ್ಪತ್ತ್’ಒಂದು", - "22": "ಇಪ್ಪತ್ತ್’ಎರಡು", - "23": "ಇಪ್ಪತ್ತ್’ಮೂರು", - "24": "ಇಪ್ಪತ್ತ್’ನಾಲ್ಕು", - "25": "ಇಪ್ಪತ್ತ್’ಐದು", - "26": "ಇಪ್ಪತ್ತ್’ಆರು", - "27": "ಇಪ್ಪತ್ತ್’ಏಳು", - "28": "ಇಪ್ಪತ್ತ್’ಎಂಟು", - "29": "ಇಪ್ಪತ್ತ್’ಒಂಬತ್ತು", - "30": "ಮೂವತ್ತು", - "31": "ಮುವತ್ತ್’ಒಂದು", - "32": "ಮುವತ್ತ್’ಎರಡು", - "33": "ಮುವತ್ತ್’ಮೂರು", - "34": "ಮೂವತ್ತ್’ನಾಲ್ಕು", - "35": "ಮೂವತ್ತ್’ಐದು", - "36": "ಮೂವತ್ತ್’ಆರು", - "37": "ಮೂವತ್ತ್’ಏಳು", - "38": "ಮೂವತ್ತ್’ಎಂಟು", - "39": "ಮೂವತ್ತ್’ಒಂಬತ್ತು", - "40": "ನಲವತ್ತು", - "41": "ನಲವತ್ತೊಂದು", - "42": "ನಲವತ್ತ್ ಎರಡು", - "43": "ನಲವತ್ತ್ ಮೂರು", - "44": "ನಲವತ್ತ್ ನಾಲ್ಕು", - "45": "ನಲವತ್ತೈದು", - "46": "ನಲವತ್ತಾರು", - "47": "ನಲವತ್ತೇಳು", - "48": "ನಲವತ್ತೆಂಟು", - "49": "ನಲವತ್ತೊಂಬತ್ತು", - "50": "ಐವತ್ತು", - "51": "ಐವತ್ತೊಂದು", - "52": "ಐವತ್ತೆರಡು", - "53": "ಐವತ್ತಮೂರು", - "54": "ಐವತ್ತ್ನಾಲ್ಕು", - "55": "ಐವತ್ತೈದು", - "56": "ಐವತ್ತಾರು", - "57": "ಐವತ್ತೇಳು", - "58": "ಐವತ್ತೆಂಟು", - "59": "ಐವತ್ತೊಂಬತ್ತು", - "60": "ಅರವತ್ತು", - "61": "ಅರವತ್ತೊಂದು", - "62": "ಅರವತ್ತೆರಡು", - "63": "ಅರವತ್ತ್ ಮೂರು", - "64": "ಅರವತ್ತ್ ನಾಲ್ಕು", - "65": "ಅರವತ್ತೈದು", - "66": "ಅರವತ್ತಾರು", - "67": "ಅರವತ್ತೇಳು", - "68": "ಅರವತ್ತೆಂಟು", - "69": "ಅರವತ್ತೊಂಬತ್ತು", - "70": "ಎಪ್ಪತ್ತು", - "71": "ಎಪ್ಪತ್ತೊಂದು", - "72": "ಎಪ್ಪತ್ತೆರಡು", - "73": "ಎಪ್ಪತ್ತ್ ಮೂರು", - "74": "ಎಪ್ಪತ್ತ್ ನಾಲ್ಕು", - "75": "ಎಪ್ಪತ್ತೈದು", - "76": "ಎಪ್ಪತ್ತಾರು", - "77": "ಎಪ್ಪತ್ತೇಳು", - "78": "ಎಪ್ಪತ್ತೆಂಟು", - "79": "ಎಪ್ಪತ್ತೊಂಬತ್ತು", - "80": "ಎಂಬತ್ತು", - "81": "ಎಂಬತ್ತೊಂದು", - "82": "ಎಂಬತ್ತೆರಡು", - "83": "ಎಂಬತ್ತ್ ಮೂರು", - "84": "ಎಂಬತ್ತ್ ನಾಲ್ಕು", - "85": "ಎಂಬತ್ತೈದು", - "86": "ಎಂಬತ್ತಾರು", - "87": "ಎಂಬತ್ತೇಳು", - "88": "ಎಂಬತ್ತೆಂಟು", - "89": "ಎಂಬತ್ತೊಂಬತ್ತು", - "90": "ತೊಂಬತ್ತು", - "91": "ತೊಂಬತ್ತೊಂದು", - "92": "ತೊಂಬತ್ತೆರಡು", - "93": "ತೊಂಬತ್ತ ಮೂರು", - "94": "ತೊಂಬತ್ತ ನಾಲ್ಕು", - "95": "ತೊಂಬತ್ತೈದು", - "96": "ತೊಂಬತ್ತಾರು", - "97": "ತೊಂಬತ್ತೇಳು", - "98": "ತೊಂಬತ್ತೆಂಟು", - "99": "ತೊಂಬತ್ತೊಂಬತ್ತು", - "100": "ನೂರ", - "1000": "ಸಾವಿರದ", - "100000": "ಲಕ್ಷದ", - "10000000": "ಕೋಟಿ", - "1000000000": "ಶತಕೋಟಿ", -} # Kannada -num_dict["or"] = { - "0": "ଶୁନ୍ୟ", - "1": "ଏକ", - "2": "ଦୁଇ", - "3": "ତିନି", - "4": "ଚାରି", - "5": "ପାଞ୍ଚ", - "6": "ଛଅ", - "7": "ସାତ", - "8": "ଆଠ", - "9": "ନଅ", - "10": "ନଅ", - "11": "ଏଗାର", - "12": "ବାର", - "13": "ତେର", - "14": "ଚଉଦ", - "15": "ପନ୍ଦର", - "16": "ଷୋହଳ", - "17": "ସତର", - "18": "ଅଠର", - "19": "ଊଣାଇଶ", - "20": "କୋଡିଏ", - "21": "ଏକୋଇଶି", - "22": "ବାଇଶି", - "23": "ତେଇଶି", - "24": "ଚବିଶି", - "25": "ପଚିଶି", - "26": "ଛବିଶି", - "27": "ସତାଇଶି", - "28": "ଅଠାଇଶି", - "29": "ଅଣତିରିଶି", - "30": "ତିରିଶି", - "31": "ଏକତିରିଶି", - "32": "ବତିଶି", - "33": "ତେତିଶି", - "34": "ଚଉତିରିଶି", - "35": "ପଞ୍ଚତିରିଶି", - "36": "ଛତିଶି", - "37": "ସଂଇତିରିଶି", - "38": "ଅଠତିରିଶି", - "39": "ଅଣଚାଳିଶି", - "40": "ଚାଳିଶି", - "41": "ଏକଚାଳିଶି", - "42": "ବୟାଳିଶି", - "43": "ତେୟାଳିଶି", - "44": "ଚଉରାଳିଶି", - "45": "ପଞ୍ଚଚାଳିଶି", - "46": "ଛୟାଳିଶି", - "47": "ସତଚାଳିଶି", - "48": "ଅଠଚାଳିଶି", - "49": "ଅଣଚାଶ", - "50": "ପଚାଶ", - "51": "ଏକାବନ", - "52": "ବାଉନ", - "53": "ତେପନ", - "54": "ଚଉବନ", - "55": "ପଞ୍ଚାବନ", - "56": "ଛପନ", - "57": "ସତାବନ", - "58": "ଅଠାବନ", - "59": "ଅଣଷଠି", - "60": "ଷାଠିଏ", - "61": "ଏକଷଠି", - "62": "ବାଷଠି", - "63": "ତେଷଠି", - "64": "ଚଉଷଠି", - "65": "ପଞ୍ଚଷଠି", - "66": "ଛଅଷଠି", - "67": "ସତଷଠି", - "68": "ଅଠଷଠି", - "69": "ଅଣସ୍ତରୀ", - "70": "ସତୂରୀ", - "71": "ଏକସ୍ତରୀ", - "72": "ବାସ୍ତରୀ", - "73": "ତେସ୍ତରୀ", - "74": "ଚଉସ୍ତରୀ", - "75": "ପଞ୍ଚସ୍ତରୀ", - "76": "ଛଅସ୍ତରୀ", - "77": "ସତସ୍ତରୀ", - "78": "ଅଠସ୍ତରୀ", - "79": "ଅଣାଅଶୀ", - "80": "ଅଶୀ", - "81": "ଏକାଅଶୀ", - "82": "ବୟାଅଶୀ", - "83": "ତେୟାଅଶୀ", - "84": "ଚଉରାଅଶୀ", - "85": "ପଞ୍ଚାଅଶୀ", - "86": "ଛୟାଅଶୀ", - "87": "ସତାଅଶୀ", - "88": "ଅଠାଅଶୀ", - "89": "ଅଣାନବେ", - "90": "ନବେ", - "91": "ଏକାନବେ", - "92": "ବୟାନବେ", - "93": "ତେୟାନବେ", - "94": "ଚଉରାନବେ", - "95": "ପଞ୍ଚାନବେ", - "96": "ଛୟାନବେ", - "97": "ସତାନବେ", - "98": "ଅଠାନବେ", - "99": "ଅନେଶତ", - "100": "ଶହେ", - "1000": "ହଜାର", - "100000": "ଲକ୍ଷ", - "10000000": "କୋଟି", - "1000000000": "କୋଟି", -} # Oriya -num_dict["pa"] = { - "0": "ਸਿਫਰ ", - "1": "ਇੱਕ", - "2": "ਦੋ", - "3": "ਤਿੰਨ", - "4": "ਚਾਰ", - "5": "ਪੰਜ", - "6": "ਛੇ", - "7": "ਸੱਤ", - "8": "ਅੱਠ", - "9": "ਨੌਂ", - "10": "ਦੱਸ", - "11": "ਗਿਆਰਾਂ", - "12": "ਬਾਰਾਂ", - "13": "ਤੇਰਾਂ", - "14": "ਚੌਦਾਂ", - "15": "ਪੰਦਰਾਂ", - "16": "ਸੋਲ਼ਾਂ", - "17": "ਸਤਾਰਾਂ", - "18": "ਅਠਾਰਾਂ", - "19": "ਉਨੀ", - "20": "ਵੀਹ", - "21": "ਇੱਕੀ", - "22": "ਬਾਈ", - "23": "ਤੇਈ", - "24": "ਚੌਵੀ", - "25": "ਪੰਝੀ", - "26": "ਛੱਬੀ", - "27": "ਸਤਾਈ", - "28": "ਅਠਾਈ", - "29": "ਉਨੱਤੀ", - "30": "ਤੀਹ", - "31": "ਇਕੱਤੀ", - "32": "ਬੱਤੀ", - "33": "ਤੇਤੀ", - "34": "ਚੌਂਤੀ", - "35": "ਪੈਂਤੀ", - "36": "ਛੱਤੀ", - "37": "ਸੈਂਤੀ", - "38": "ਅਠੱਤੀ", - "39": "ਉਨਤਾਲੀ", - "40": "ਚਾਲੀ", - "41": "ਇਕਤਾਲੀ", - "42": "ਬਤਾਲੀ", - "43": "ਤਰਤਾਲੀ", - "44": "ਚੌਤਾਲੀ", - "45": "ਪੰਜਤਾਲੀ", - "46": "ਛਿਆਲੀ", - "47": "ਸੰਤਾਲੀ", - "48": "ਅੱਠਤਾਲੀ", - "49": "ਉਣਿੰਜਾ", - "50": "ਪੰਜਾਹ", - "51": "ਇਕਵਿੰਜਾ", - "52": "ਬਵਿੰਜਾ", - "53": "ਤਰਵਿੰਜਾ", - "54": "ਚਰਿੰਜਾ", - "55": "ਪਚਵਿੰਜਾ", - "56": "ਛਪਿੰਜਾ", - "57": "ਸਤਵਿੰਜਾ", - "58": "ਅੱਠਵਿੰਜਾ", - "59": "ਉਣਾਠ", - "60": "ਸੱਠ", - "61": "ਇਕਾਠ", - "62": "ਬਾਠ੍ਹ", - "63": "ਤਰੇਠ੍ਹ", - "64": "ਚੌਠ੍ਹ", - "65": "ਪੈਂਠ", - "66": "ਛਿਆਠ", - "67": "ਸਤਾਹਠ", - "68": "ਅੱਠਾਠ", - "69": "ਉਣੱਤਰ", - "70": "ਸੱਤਰ", - "71": "ਇਕ੍ਹੱਤਰ", - "72": "ਬਹੱਤਰ", - "73": "ਤਹੱਤਰ", - "74": "ਚੌਹੱਤਰ", - "75": "ਪੰਜੱਤਰ", - "76": "ਛਿਹੱਤਰ", - "77": "ਸਤੱਤਰ", - "78": "ਅਠੱਤਰ", - "79": "ਉਣਾਸੀ", - "80": "ਅੱਸੀ", - "81": "ਇਕਾਸੀ", - "82": "ਬਿਆਸੀ", - "83": "ਤਰਾਸੀ", - "84": "ਚਰਾਸੀ", - "85": "ਪੰਜਾਸੀ", - "86": "ਛਿਆਸੀ", - "87": "ਸਤਾਸੀ", - "88": "ਅਠਾਸੀ", - "89": "ਉਣਾਨਵੇਂ", - "90": "ਨੱਬੇ", - "91": "ਇਕਾਨਵੇਂ", - "92": "ਬਿਆਨਵੇਂ", - "93": "ਤਰਾਨਵੇਂ", - "94": "ਚਰਾਨਵੇਂ", - "95": "ਪਚਾਨਵੇਂ", - "96": "ਛਿਆਨਵੇਂ", - "97": "ਸਤਾਨਵੇਂ", - "98": "ਅਠਾਨਵੇਂ", - "99": "ਨਿੜਾਨਵੇਂ", - "100": "ਸੌ", - "1000": "ਹਜਾਰ", - "100000": "ਲੱਖ", - "10000000": "ਕਰੋੜ", - "1000000000": "ਅਰਬ", -} # Punjabi - -# --------------------------- num_to_word.py ------------------------------ -""" -Method to convert Numbers to Words -for indian languages - -Use cases:- -1) Speech recognition pre-processing -2) Language modeling Data pre-processing - -------------------------- -check indic_numbers.py to add support -for any indian language -""" - - -def language_specific_exception(words, lang, combiner): - """ - Language Specific Exception will come here - """ - - def occurs_at_end(piece): - return words[-len(piece) :] == piece - - if lang == "mr": - words = words.replace("एक" + combiner + "शे", "शंभर") - elif lang == "gu": - words = words.replace("બે" + combiner + "સો", "બસ્સો") - elif lang == "te": - exception_dict = { - "1": "ఒక", - "100": "వంద", - "100+": "వందలు", - "1000": "వెయ్యి", - "1000+": "వేలు", - "100000": "లక్ష", - "100000+": "లక్షలు", - "10000000": "కోటి", - "10000000+": "కోట్లు", - } - - test_case = ["100", "1000", "100000", "10000000"] - for test in test_case: - test_word = num_dict["te"][test] - match = num_dict["te"]["1"] + combiner + test_word - # for numbers like : 100, 1000, 100000 - if words == match: - return exception_dict[test] - # for numbers like : 200, 4000, 800000 - elif occurs_at_end(test_word): - words = words.replace(test_word, exception_dict[test + "+"]) - # for numbers like : 105, 1076, 123993 - elif not occurs_at_end(match): - replacement = exception_dict["1"] + combiner + exception_dict[test] - words = words.replace(match, replacement) - - # Exception case for 101...199 - special_case = "ఒక" + combiner + "వంద" - words = words.replace(special_case, "నూట") - elif lang == "kn": - # special case for 100 - if words == ("ಒಂದು" + combiner + "ನೂರ"): - return "ನೂರು" - exception_dict = { - "ನೂರ": "ನೂರು", - "ಸಾವಿರದ": "ಸಾವಿರ", - "ಲಕ್ಷದ": "ಲಕ್ಷ", - "ಕೋಟಿಯ": "ಕೋಟಿ", - } - for expt in exception_dict: - if occurs_at_end(expt): - words = words.replace(expt, exception_dict[expt]) - return words - - -def num_to_word(num, lang, separator=", ", combiner=" "): - """ - Main Method - :param num: Number digits from any indian language - :param lang: Language Code from supported Language - :param separator: Separator character i.e. separator = '-' --> 'two hundred-sixty' - :param combiner: combine number with position i.e. combiner = '-' --> 'two-hundred sixty' - :return: UTF-8 String of numbers in words - """ - lang = lang.lower() - num = str(num) - - # Load dictionary according to language code - assert lang in supported_lang, "Language not supported" - num_dic = num_dict[lang] - - # dash default combiner for english-india - if (lang == "en") & (combiner == " "): - combiner = "-" - - # Remove punctuations from numbers - num = str(num).replace(",", "").replace(" ", "") - - # Replace native language numbers with english digits - for language in supported_lang: - for num_index in range(10): - num = num.replace(all_num[language][num_index], all_num["en"][num_index]) - - # Assert that input contains only integer number - for digit in num: - assert digit in all_num["en"], "Give proper input" - - # Process - # For Number longer than 9 digits - def all_two_digit(digits_2): - if len(digits_2) <= 1: # Provided only one/zero digit - return num_dic.get(digits_2, "") - elif digits_2 == "00": # Two Zero provided - return num_dic["0"] + separator + num_dic["0"] - elif digits_2[0] == "0": # First digit is zero - return num_dic["0"] + separator + num_dic[digits_2[1]] - else: # Both digit provided - return num_dic[digits_2] - - # For Number less than 9 digits - def two_digit(digits_2): - digits_2 = digits_2.lstrip("0") - if len(digits_2) != 0: - return num_dic[digits_2] - else: - return "" - - def all_digit(digits): - digits = digits.lstrip("0") - digit_len = len(digits) - if digit_len > 3: - num_of_digits_to_process = (digit_len % 2) + 1 - process_digits = digits[:num_of_digits_to_process] - base = str(10 ** (int(digit_len / 2) * 2 - 1)) - remain_digits = digits[num_of_digits_to_process:] - return ( - num_dic[process_digits] - + combiner - + num_dic[base] - + separator - + all_digit(remain_digits) - ) - elif len(digits) == 3: - return ( - num_dic[digits[:1]] - + combiner - + num_dic["100"] - + separator - + two_digit(digits[1:]) - ) - else: - return two_digit(digits) - - num = num.lstrip("0") - full_digit_len = len(num) - - if full_digit_len == 0: - output = num_dic["0"] - elif full_digit_len <= 9: - output = all_digit(num) - else: - iteration = round(full_digit_len / 2) - output = all_two_digit(num[:2]) # First to digit - for i in range(1, iteration): - output = ( - output + separator + all_two_digit(num[i * 2 : (i + 1) * 2]) - ) # Next two digit pairs - remaining_digits = num[iteration * 2 :] - if not all_two_digit(remaining_digits) == "": - output = ( - output + separator + all_two_digit(remaining_digits) - ) # remaining Last one/two digits - - output = output.strip(separator) - - output = language_specific_exception(output, lang, combiner) - - return output - - -# --------------------------------- num_to_word_on_a_sent --------------------------------- - - -def is_digit(word, digit_pattern): - return re.search(digit_pattern, word) - - -def remove_punct(sent): - clean = re.sub("[%s]" % re.escape(string.punctuation), " ", sent) - return " ".join([word for word in clean.split() if word]) - - -def normalize_nums(text, lang): - """ - text: str (eg) - lang: lang code ['en', 'hi'] - - returns: str - (eg) - """ - - if lang in supported_lang: - words = text.split() - lang_digits = [str(i) for i in range(0, 10)] - - digit_pattern = "[" + "".join(lang_digits) + "]" - num_indices = [ - ind for ind, word in enumerate(words) if is_digit(word, digit_pattern) - ] - - words_up = [ - num_to_word(word, lang, separator=" ", combiner=" ") - if ind in num_indices - else word - for ind, word in enumerate(words) - ] - return " ".join(words_up) - else: - return text - - -if __name__ == "__main__": - print(normalize_nums("रीटा के पास 16 बिल्लियाँ हैं।", "hi")) diff --git a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/tests/test_bpe.py b/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/tests/test_bpe.py deleted file mode 100644 index d8c84857b4aa22903907ae3d217bf9468b168c88..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/tests/test_bpe.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals -import unittest -import codecs - -import os,sys,inspect -currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) -parentdir = os.path.dirname(currentdir) -sys.path.insert(0,parentdir) - -from learn_bpe import learn_bpe -from apply_bpe import BPE - - -class TestBPELearnMethod(unittest.TestCase): - - def test_learn_bpe(self): - infile = codecs.open(os.path.join(currentdir,'data','corpus.en'), encoding='utf-8') - outfile = codecs.open(os.path.join(currentdir,'data','bpe.out'), 'w', encoding='utf-8') - learn_bpe(infile, outfile, 1000) - infile.close() - outfile.close() - - outlines = open(os.path.join(currentdir,'data','bpe.out')) - reflines = open(os.path.join(currentdir,'data','bpe.ref')) - - for line, line2 in zip(outlines, reflines): - self.assertEqual(line, line2) - - outlines.close() - reflines.close() - -class TestBPESegmentMethod(unittest.TestCase): - - def setUp(self): - - with codecs.open(os.path.join(currentdir,'data','bpe.ref'), encoding='utf-8') as bpefile: - self.bpe = BPE(bpefile) - - self.infile = codecs.open(os.path.join(currentdir,'data','corpus.en'), encoding='utf-8') - self.reffile = codecs.open(os.path.join(currentdir,'data','corpus.bpe.ref.en'), encoding='utf-8') - - def tearDown(self): - - self.infile.close() - self.reffile.close() - - def test_apply_bpe(self): - - for line, ref in zip(self.infile, self.reffile): - out = self.bpe.process_line(line) - self.assertEqual(out, ref) - - def test_trailing_whitespace(self): - """BPE.proces_line() preserves leading and trailing whitespace""" - - orig = ' iron cement \n' - exp = ' ir@@ on c@@ ement \n' - - out = self.bpe.process_line(orig) - self.assertEqual(out, exp) - - def test_utf8_whitespace(self): - """UTF-8 whitespace is treated as normal character, not word boundary""" - - orig = 'iron\xa0cement\n' - exp = 'ir@@ on@@ \xa0@@ c@@ ement\n' - - out = self.bpe.process_line(orig) - self.assertEqual(out, exp) - - def test_empty_line(self): - - orig = '\n' - exp = '\n' - - out = self.bpe.process_line(orig) - self.assertEqual(out, exp) - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.803c5e11.css b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.803c5e11.css deleted file mode 100644 index bca8fc3e5366c4dbbeef359caafacb668d901e99..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.803c5e11.css +++ /dev/null @@ -1 +0,0 @@ -div.svelte-10ogue4>*:not(.absolute){border-radius:0!important}div.svelte-10ogue4>*:first-child{border-top-left-radius:.5rem!important;border-top-right-radius:.5rem!important}div.svelte-10ogue4>*:last-child{border-bottom-left-radius:.5rem!important;border-bottom-right-radius:.5rem!important}div.svelte-10ogue4>*+*:not(.absolute){border-top-width:0px!important} diff --git a/spaces/Hoodady/3DFuse/cldm/hack.py b/spaces/Hoodady/3DFuse/cldm/hack.py deleted file mode 100644 index 454361e9d036cd1a6a79122c2fd16b489e4767b1..0000000000000000000000000000000000000000 --- a/spaces/Hoodady/3DFuse/cldm/hack.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch -import einops - -import ldm.modules.encoders.modules -import ldm.modules.attention - -from transformers import logging -from ldm.modules.attention import default - - -def disable_verbosity(): - logging.set_verbosity_error() - print('logging improved.') - return - - -def enable_sliced_attention(): - ldm.modules.attention.CrossAttention.forward = _hacked_sliced_attentin_forward - print('Enabled sliced_attention.') - return - - -def hack_everything(clip_skip=0): - disable_verbosity() - ldm.modules.encoders.modules.FrozenCLIPEmbedder.forward = _hacked_clip_forward - ldm.modules.encoders.modules.FrozenCLIPEmbedder.clip_skip = clip_skip - print('Enabled clip hacks.') - return - - -# Written by Lvmin -def _hacked_clip_forward(self, text): - PAD = self.tokenizer.pad_token_id - EOS = self.tokenizer.eos_token_id - BOS = self.tokenizer.bos_token_id - - def tokenize(t): - return self.tokenizer(t, truncation=False, add_special_tokens=False)["input_ids"] - - def transformer_encode(t): - if self.clip_skip > 1: - rt = self.transformer(input_ids=t, output_hidden_states=True) - return self.transformer.text_model.final_layer_norm(rt.hidden_states[-self.clip_skip]) - else: - return self.transformer(input_ids=t, output_hidden_states=False).last_hidden_state - - def split(x): - return x[75 * 0: 75 * 1], x[75 * 1: 75 * 2], x[75 * 2: 75 * 3] - - def pad(x, p, i): - return x[:i] if len(x) >= i else x + [p] * (i - len(x)) - - raw_tokens_list = tokenize(text) - tokens_list = [] - - for raw_tokens in raw_tokens_list: - raw_tokens_123 = split(raw_tokens) - raw_tokens_123 = [[BOS] + raw_tokens_i + [EOS] for raw_tokens_i in raw_tokens_123] - raw_tokens_123 = [pad(raw_tokens_i, PAD, 77) for raw_tokens_i in raw_tokens_123] - tokens_list.append(raw_tokens_123) - - tokens_list = torch.IntTensor(tokens_list).to(self.device) - - feed = einops.rearrange(tokens_list, 'b f i -> (b f) i') - y = transformer_encode(feed) - z = einops.rearrange(y, '(b f) i c -> b (f i) c', f=3) - - return z - - -# Stolen from https://github.com/basujindal/stable-diffusion/blob/main/optimizedSD/splitAttention.py -def _hacked_sliced_attentin_forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - del context, x - - q, k, v = map(lambda t: einops.rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - limit = k.shape[0] - att_step = 1 - q_chunks = list(torch.tensor_split(q, limit // att_step, dim=0)) - k_chunks = list(torch.tensor_split(k, limit // att_step, dim=0)) - v_chunks = list(torch.tensor_split(v, limit // att_step, dim=0)) - - q_chunks.reverse() - k_chunks.reverse() - v_chunks.reverse() - sim = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) - del k, q, v - for i in range(0, limit, att_step): - q_buffer = q_chunks.pop() - k_buffer = k_chunks.pop() - v_buffer = v_chunks.pop() - sim_buffer = torch.einsum('b i d, b j d -> b i j', q_buffer, k_buffer) * self.scale - - del k_buffer, q_buffer - # attention, what we cannot get enough of, by chunks - - sim_buffer = sim_buffer.softmax(dim=-1) - - sim_buffer = torch.einsum('b i j, b j d -> b i d', sim_buffer, v_buffer) - del v_buffer - sim[i:i + att_step, :, :] = sim_buffer - - del sim_buffer - sim = einops.rearrange(sim, '(b h) n d -> b n (h d)', h=h) - return self.to_out(sim) diff --git a/spaces/Hoodady/3DFuse/voxnerf/utils.py b/spaces/Hoodady/3DFuse/voxnerf/utils.py deleted file mode 100644 index 94c261098d65432c1b8ee7e3314918ebfbd06daf..0000000000000000000000000000000000000000 --- a/spaces/Hoodady/3DFuse/voxnerf/utils.py +++ /dev/null @@ -1,67 +0,0 @@ -import numpy as np -import math - - -def blend_rgba(img): - img = img[..., :3] * img[..., -1:] + (1. - img[..., -1:]) # blend A to RGB - return img - - -class PSNR(): - @classmethod - def psnr(cls, ref, pred, max=1.0): - # if inputs of type int, then make sure max is 255 - mse = ((ref - pred) ** 2).mean() - return cls.psnr_from_mse(mse, max) - - @staticmethod - def psnr_from_mse(mse, max=1.0): - psnr = 20 * math.log10(max) - 10 * math.log10(mse) - return psnr - - @staticmethod - def psnr_to_rms(psnr_diff): - """rms error improvement _ratio_ from psnr _diff_""" - ratio = 10 ** (-psnr_diff / 20) - return ratio - - -class Scrambler(): - def __init__(self, N): - self.perm = np.random.permutation(N) - - def apply(self, *items): - return [elem[self.perm] for elem in items] - - def unscramble(self, *items): - ret = [] - for elem in items: - clean = np.zeros_like(elem) - clean[self.perm] = elem - ret.append(clean) - return ret - - -def trailing_window_view(xs, window_size): - assert (window_size % 2) == 1, "window size should be odd" - view = np.lib.stride_tricks.sliding_window_view( - np.pad(xs, (window_size - 1, 0), mode="edge"), window_size - ) - return view - - -def to_step(pbar, percent): - step = int(pbar.total * percent / 100) - return step - - -def every(pbar, *, percent=None, step=None): - if step is None: - step = to_step(pbar, percent) - return (pbar.n + 1) % step == 0 - - -def at(pbar, *, percent=None, step=None): - if step is None: - step = to_step(pbar, percent) - return (pbar.n + 1) == step diff --git a/spaces/HuangLab/CELL-E_2-Sequence_Prediction/taming/modules/discriminator/model.py b/spaces/HuangLab/CELL-E_2-Sequence_Prediction/taming/modules/discriminator/model.py deleted file mode 100644 index 2aaa3110d0a7bcd05de7eca1e45101589ca5af05..0000000000000000000000000000000000000000 --- a/spaces/HuangLab/CELL-E_2-Sequence_Prediction/taming/modules/discriminator/model.py +++ /dev/null @@ -1,67 +0,0 @@ -import functools -import torch.nn as nn - - -from taming.modules.util import ActNorm - - -def weights_init(m): - classname = m.__class__.__name__ - if classname.find('Conv') != -1: - nn.init.normal_(m.weight.data, 0.0, 0.02) - elif classname.find('BatchNorm') != -1: - nn.init.normal_(m.weight.data, 1.0, 0.02) - nn.init.constant_(m.bias.data, 0) - - -class NLayerDiscriminator(nn.Module): - """Defines a PatchGAN discriminator as in Pix2Pix - --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py - """ - def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False): - """Construct a PatchGAN discriminator - Parameters: - input_nc (int) -- the number of channels in input images - ndf (int) -- the number of filters in the last conv layer - n_layers (int) -- the number of conv layers in the discriminator - norm_layer -- normalization layer - """ - super(NLayerDiscriminator, self).__init__() - if not use_actnorm: - norm_layer = nn.BatchNorm2d - else: - norm_layer = ActNorm - if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters - use_bias = norm_layer.func != nn.BatchNorm2d - else: - use_bias = norm_layer != nn.BatchNorm2d - - kw = 4 - padw = 1 - sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] - nf_mult = 1 - nf_mult_prev = 1 - for n in range(1, n_layers): # gradually increase the number of filters - nf_mult_prev = nf_mult - nf_mult = min(2 ** n, 8) - sequence += [ - nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), - norm_layer(ndf * nf_mult), - nn.LeakyReLU(0.2, True) - ] - - nf_mult_prev = nf_mult - nf_mult = min(2 ** n_layers, 8) - sequence += [ - nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), - norm_layer(ndf * nf_mult), - nn.LeakyReLU(0.2, True) - ] - - sequence += [ - nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map - self.main = nn.Sequential(*sequence) - - def forward(self, input): - """Standard forward.""" - return self.main(input) diff --git a/spaces/HugoHE/monitoringObjectDetection/runtime_monitors/Monitor.py b/spaces/HugoHE/monitoringObjectDetection/runtime_monitors/Monitor.py deleted file mode 100644 index 41156377cc1cb568429f89c2fa89b450d229f37e..0000000000000000000000000000000000000000 --- a/spaces/HugoHE/monitoringObjectDetection/runtime_monitors/Monitor.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -from abstractions import * -import pickle -import numpy as np - -class Monitor(object): - - def __init__(self, good_ref=None): - # self.abs_type = abs_type - self.good_ref = good_ref - - - def set_reference(self, good_ref): - self.good_ref = good_ref - - # def get_identity(self): - # print("Monitor for network:" + self.netName + "class: " + str(self.classification) + "at layer " + str(self.location)) - - - def make_verdicts(self, features): - if len(self.good_ref): - verdicts = ref_query(features, self.good_ref) - else: - raise RuntimeError("No reference exists!") - return verdicts - -def ref_query(features, reference): - query_results = [boxes_query(x, reference) for x in features] - return query_results - - -# def query_infusion(in_good_ref, in_bad_ref): -# if len(in_good_ref) == len(in_bad_ref): #0: acceptance (true, false), 1: rejection (false, true or false), 2: uncertainty (true, true) -# verdicts = np.zeros(len(in_good_ref), dtype=int) -# for i in range(len(in_good_ref)): -# if not in_good_ref[i]: -# verdicts[i] = 1 -# elif in_bad_ref[i]: -# verdicts[i] = 2 -# return verdicts -# else: -# print("Error: IllegalArgument") diff --git a/spaces/HuguesdeF/moulinette/Corriger.py b/spaces/HuguesdeF/moulinette/Corriger.py deleted file mode 100644 index 2ff010c10f2898e939191d5702bd86f644ee530b..0000000000000000000000000000000000000000 --- a/spaces/HuguesdeF/moulinette/Corriger.py +++ /dev/null @@ -1,162 +0,0 @@ -import streamlit as st -import os -import streamlit_authenticator as stauth -from code.functions import pipeline_svg -from PIL import Image -import cv2 -import numpy as np -from io import BytesIO -import copy - -logo = Image.open("seguinmoreau.png") -st.set_page_config( - page_title="Moulinette Logos", - page_icon=logo, - layout="wide", - initial_sidebar_state="expanded" -) - -inch_value = 2.54 - -logo = Image.open('seguinmoreau.png') -st.image(logo, width=200) -st.markdown( - """ - # Boîte à Outils de correction de logos :wrench: - - Bienvenue dans la boîte à outils de correction de logos de Seguin Moreau. - - ### :hammer: Les outils - Dans cette boîte à outils, vous trouverez: - * Un outil de Correction automatique de logo (enlever les petits défauts, lissage, vectorisation, grossissement des traits trop fins). - - ### :bulb: Mode d'emploi - * Cliquer sur 'Browse files' - * Sélectionner un logo - * La correction est automatique. Si la correction ne vous convient pas, il est possible de régler les paramètres en cliquant sur 'Paramétrage' à droite de l'image. - * Les deux paramètres permettent de corriger les défauts liés à la présence de gris sur le logo ou la 'pixélisation' du logo trop importante. - - """ -) - -uploaded_files = st.file_uploader("Choisir un logo", accept_multiple_files=True) - -image_width = 500 -size_value = st.slider("Largeur de trait minimum", min_value=1, max_value=21, value=7, step=2) - -size_value = (size_value - 1) // 2 - -# kernel_type_str = st.selectbox("Kernel type", ["Ellipse", "Rectangle", "Cross"]) -kernel_type_str = "Ellipse" -dict_kernel_type = {"Ellipse": cv2.MORPH_ELLIPSE, "Rectangle": cv2.MORPH_RECT, "Cross": cv2.MORPH_CROSS} -kernel_type = dict_kernel_type[kernel_type_str] - -for uploaded_file in uploaded_files: - col1, col2, col3 = st.columns([1, 1, 1]) - col3.markdown("---") - - image = Image.open(uploaded_file).convert('L') - image_input = np.array(image) - image = copy.deepcopy(image_input) - col1.image(image_input / 255.0, caption="Image d'entrée", use_column_width='auto') - - with col3: - with st.expander(":gear: Paramétrage"): - st.write("Si l'image contient du gris, faire varier le seuil ci-dessous:") - threshold = st.slider("Seuil pour convertir l'image en noir&blanc.", min_value=0, max_value=255, - value=0, - step=1, key=f"{uploaded_file}_slider_threshold") - st.write("Si l'image est pixelisée, ou contient trop de détails, " - "augmenter la valeur ci-dessous:") - blur_value = st.slider("Seuil pour lisser l'image", min_value=1, max_value=11, value=1, step=2, - key=f"{uploaded_file}_slider_gaussian_sigma") - st.write("Si l'image contient des traits très fin (de l'odre du pixel)," - " augmenter le seuil ci-dessous, de 1 par 1:") - dilate_lines_value = st.slider("Dilatation de l'image d'origine: (en pixels)", min_value=0, max_value=5, - value=0, step=1, key=f"{uploaded_file}_slider_dilation_image") - - st.write("Taille d'exportation d'image:") - - dpi_value = st.number_input("Valeur dpi:", key=f"{uploaded_file}_number_dpi_value", value=200) - - st.write("---") - st.write("Spécifier la taille maximum d'un côté, en cm:") - side_width_value = st.number_input("Taille max de côté cible (cm):", - key=f"{uploaded_file}_number_target_value", value=20.0) - new_largest_side_value = int(side_width_value / inch_value * dpi_value) - - h, w, *_ = image.shape - - # Resize image - ratio = w / h - if ratio > 1: - width = new_largest_side_value - height = int(width / ratio) - else: - height = new_largest_side_value - width = int(ratio * height) - - st.write("---") - st.write("Ou, spécifier la largeur OU la hauteur cible, en cm:") - - target_width_value = st.number_input("Largeur cible (cm):", key=f"{uploaded_file}_number_width_value", - value=0.0) - target_height_value = st.number_input("Hauteur cible (cm):", key=f"{uploaded_file}_number_height_value", - value=0.0) - - if target_width_value > 0 and target_height_value == 0: - width = int(target_width_value / inch_value * dpi_value) - height = int(width / ratio) - elif target_height_value > 0 and target_width_value == 0: - height = int(target_height_value / inch_value * dpi_value) - width = int(height * ratio) - elif target_height_value > 0 and target_width_value > 0: - st.warning("Vous ne pouvez pas modifier la largeur et la hauteur simultanément.") - - st.info(f"Le logo sera redimensionné de :") - st.info(f"hauteur={h} pixels et largeur={w} pixels vers " - f"hauteur={height} pixels et largeur={width} pixels.") - - if threshold > 0: - image = (image > threshold) * 255 - image = image.astype('uint8') - - if blur_value > 0: - image = cv2.GaussianBlur(image, (blur_value, blur_value), blur_value - 1) - - # Process image cv32f ==> cv32f - img_final = pipeline_svg(image, size_value=size_value, level=1, threshold=threshold, kernel_type=kernel_type, - dilate_lines_value=dilate_lines_value) - - col2.image(img_final, caption="Image corrigée", use_column_width='auto') - - # Check for grayscale - tolerance = 10 - ratio_of_gray_pixels = int(np.sum((tolerance < image) * (image < 255 - tolerance)) / np.size(image) * 100) - if ratio_of_gray_pixels > 1: - col3.warning(f":warning: Le nombre de pixels gris est élevé: {ratio_of_gray_pixels} % > 1%") - - # Check reconstruction fidelity - distance = np.mean((np.array(image) - img_final) ** 2) - if distance > 10: - col3.warning( - f":warning: Le logo est peut-être trop dégradé (MSE={distance:.2f} > 10).\nVérifier visuellement.") - - dim = (width, height) - # resize image - resized_img_final = cv2.resize(img_final, dim, interpolation=cv2.INTER_AREA) - resized_image_input = cv2.resize(image_input, dim, interpolation=cv2.INTER_AREA) - - buf = BytesIO() - # img_stacked = np.hstack((resized_image_input, resized_img_final)) - img_final = Image.fromarray(resized_img_final).convert("L") - - img_final.save(buf, format="PNG", dpi=(dpi_value, dpi_value)) - byte_im = buf.getvalue() - - btn = col3.download_button( - label=":inbox_tray: Télécharger l'image", - data=byte_im, - file_name=f"corrected_{uploaded_file.name}", - mime="image/png" - ) diff --git a/spaces/HutzHoo/dreamlike-photoreal-2.0/app.py b/spaces/HutzHoo/dreamlike-photoreal-2.0/app.py deleted file mode 100644 index 0d0ae718eacc495efdbb94276323b93eb3321f76..0000000000000000000000000000000000000000 --- a/spaces/HutzHoo/dreamlike-photoreal-2.0/app.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import gradio as gr - -API_KEY=os.environ.get('HUGGING_FACE_HUB_TOKEN', None) - -article = """--- -This space was created using [SD Space Creator](https://huggingface.co/spaces/anzorq/sd-space-creator).""" - -gr.Interface.load( - name="models/dreamlike-art/dreamlike-photoreal-2.0", - title="""Dreamlike Photoreal 2.0""", - description="""Demo for Dreamlike Photoreal 2.0 Stable Diffusion model.""", - article=article, - api_key=API_KEY, - ).queue(concurrency_count=20).launch() diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/encoders/nltk_tokenizer.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/encoders/nltk_tokenizer.py deleted file mode 100644 index 0ab92377b3a23bb48384c3f7acf299612e8b0775..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/data/encoders/nltk_tokenizer.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.data.encoders import register_tokenizer -from fairseq.dataclass import FairseqDataclass - - -@register_tokenizer("nltk", dataclass=FairseqDataclass) -class NLTKTokenizer(object): - def __init__(self, *unused): - try: - from nltk.tokenize import word_tokenize - - self.word_tokenize = word_tokenize - except ImportError: - raise ImportError("Please install nltk with: pip install nltk") - - def encode(self, x: str) -> str: - return " ".join(self.word_tokenize(x)) - - def decode(self, x: str) -> str: - return x diff --git a/spaces/ICML2022/resefa/utils/visualizers/video_visualizer.py b/spaces/ICML2022/resefa/utils/visualizers/video_visualizer.py deleted file mode 100644 index b4c3a5934224edc5d557ad1b1458d4f776148a75..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/resefa/utils/visualizers/video_visualizer.py +++ /dev/null @@ -1,173 +0,0 @@ -# python3.7 -"""Contains the visualizer to visualize images as a video. - -This file relies on `FFmpeg`. Use `sudo apt-get install ffmpeg` and -`brew install ffmpeg` to install on Ubuntu and MacOS respectively. -""" - -import os.path -from skvideo.io import FFmpegWriter -from skvideo.io import FFmpegReader - -from ..image_utils import parse_image_size -from ..image_utils import load_image -from ..image_utils import resize_image -from ..image_utils import list_images_from_dir - -__all__ = ['VideoVisualizer', 'VideoReader'] - - -class VideoVisualizer(object): - """Defines the video visualizer that presents images as a video.""" - - def __init__(self, - path=None, - frame_size=None, - fps=25.0, - codec='libx264', - pix_fmt='yuv420p', - crf=1): - """Initializes the video visualizer. - - Args: - path: Path to write the video. (default: None) - frame_size: Frame size, i.e., (height, width). (default: None) - fps: Frames per second. (default: 24) - codec: Codec. (default: `libx264`) - pix_fmt: Pixel format. (default: `yuv420p`) - crf: Constant rate factor, which controls the compression. The - larger this field is, the higher compression and lower quality. - `0` means no compression and consequently the highest quality. - To enable QuickTime playing (requires YUV to be 4:2:0, but - `crf = 0` results YUV to be 4:4:4), please set this field as - at least 1. (default: 1) - """ - self.set_path(path) - self.set_frame_size(frame_size) - self.set_fps(fps) - self.set_codec(codec) - self.set_pix_fmt(pix_fmt) - self.set_crf(crf) - self.video = None - - def set_path(self, path=None): - """Sets the path to save the video.""" - self.path = path - - def set_frame_size(self, frame_size=None): - """Sets the video frame size.""" - height, width = parse_image_size(frame_size) - self.frame_height = height - self.frame_width = width - - def set_fps(self, fps=25.0): - """Sets the FPS (frame per second) of the video.""" - self.fps = fps - - def set_codec(self, codec='libx264'): - """Sets the video codec.""" - self.codec = codec - - def set_pix_fmt(self, pix_fmt='yuv420p'): - """Sets the video pixel format.""" - self.pix_fmt = pix_fmt - - def set_crf(self, crf=1): - """Sets the CRF (constant rate factor) of the video.""" - self.crf = crf - - def init_video(self): - """Initializes an empty video with expected settings.""" - assert not os.path.exists(self.path), f'Video `{self.path}` existed!' - assert self.frame_height > 0 - assert self.frame_width > 0 - - video_setting = { - '-r': f'{self.fps:.2f}', - '-s': f'{self.frame_width}x{self.frame_height}', - '-vcodec': f'{self.codec}', - '-crf': f'{self.crf}', - '-pix_fmt': f'{self.pix_fmt}', - } - self.video = FFmpegWriter(self.path, outputdict=video_setting) - - def add(self, frame): - """Adds a frame into the video visualizer. - - NOTE: The input frame is assumed to be with `RGB` channel order. - """ - if self.video is None: - height, width = frame.shape[0:2] - height = self.frame_height or height - width = self.frame_width or width - self.set_frame_size((height, width)) - self.init_video() - if frame.shape[0:2] != (self.frame_height, self.frame_width): - frame = resize_image(frame, (self.frame_width, self.frame_height)) - self.video.writeFrame(frame) - - def visualize_collection(self, images, save_path=None): - """Visualizes a collection of images one by one.""" - if save_path is not None and save_path != self.path: - self.save() - self.set_path(save_path) - for image in images: - self.add(image) - self.save() - - def visualize_list(self, image_list, save_path=None): - """Visualizes a list of image files.""" - if save_path is not None and save_path != self.path: - self.save() - self.set_path(save_path) - for filename in image_list: - image = load_image(filename) - self.add(image) - self.save() - - def visualize_directory(self, directory, save_path=None): - """Visualizes all images under a directory.""" - image_list = list_images_from_dir(directory) - self.visualize_list(image_list, save_path) - - def save(self): - """Saves the video by closing the file.""" - if self.video is not None: - self.video.close() - self.video = None - self.set_path(None) - - -class VideoReader(object): - """Defines the video reader. - - This class can be used to read frames from a given video. - - NOTE: Each frame can be read only once. - TODO: Fix this? - """ - - def __init__(self, path, inputdict=None): - """Initializes the video reader by loading the video from disk.""" - self.path = path - self.video = FFmpegReader(path, inputdict=inputdict) - - self.length = self.video.inputframenum - self.frame_height = self.video.inputheight - self.frame_width = self.video.inputwidth - self.fps = self.video.inputfps - self.pix_fmt = self.video.pix_fmt - - def __del__(self): - """Releases the opened video.""" - self.video.close() - - def read(self, image_size=None): - """Reads the next frame.""" - frame = next(self.video.nextFrame()) - height, width = parse_image_size(image_size) - height = height or frame.shape[0] - width = width or frame.shape[1] - if frame.shape[0:2] != (height, width): - frame = resize_image(frame, (width, height)) - return frame diff --git a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py b/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py deleted file mode 100644 index 2af819d61d589cfec2e0ca46612a7456f42b831a..0000000000000000000000000000000000000000 --- a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Conditional DETR -# Copyright (c) 2021 Microsoft. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Copied from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# ------------------------------------------------------------------------ - -from .groundingdino import build_groundingdino diff --git a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/models/registry.py b/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/models/registry.py deleted file mode 100644 index 2d22a59eec79a2a19b83fa1779f2adaf5753aec6..0000000000000000000000000000000000000000 --- a/spaces/IDEA-Research/Grounded-SAM/GroundingDINO/groundingdino/models/registry.py +++ /dev/null @@ -1,66 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# -*- coding: utf-8 -*- -# @Author: Yihao Chen -# @Date: 2021-08-16 16:03:17 -# @Last Modified by: Shilong Liu -# @Last Modified time: 2022-01-23 15:26 -# modified from mmcv - -import inspect -from functools import partial - - -class Registry(object): - def __init__(self, name): - self._name = name - self._module_dict = dict() - - def __repr__(self): - format_str = self.__class__.__name__ + "(name={}, items={})".format( - self._name, list(self._module_dict.keys()) - ) - return format_str - - def __len__(self): - return len(self._module_dict) - - @property - def name(self): - return self._name - - @property - def module_dict(self): - return self._module_dict - - def get(self, key): - return self._module_dict.get(key, None) - - def registe_with_name(self, module_name=None, force=False): - return partial(self.register, module_name=module_name, force=force) - - def register(self, module_build_function, module_name=None, force=False): - """Register a module build function. - Args: - module (:obj:`nn.Module`): Module to be registered. - """ - if not inspect.isfunction(module_build_function): - raise TypeError( - "module_build_function must be a function, but got {}".format( - type(module_build_function) - ) - ) - if module_name is None: - module_name = module_build_function.__name__ - if not force and module_name in self._module_dict: - raise KeyError("{} is already registered in {}".format(module_name, self.name)) - self._module_dict[module_name] = module_build_function - - return module_build_function - - -MODULE_BUILD_FUNCS = Registry("model build functions") diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/onnx/onnx_export.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/onnx/onnx_export.py deleted file mode 100644 index 976bfe97a213d1390bdc044b5d86cab84d10e63b..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/so-vits-svc-4.0-ikaros2/onnx/onnx_export.py +++ /dev/null @@ -1,73 +0,0 @@ -import argparse -import time -import numpy as np -import onnx -from onnxsim import simplify -import onnxruntime as ort -import onnxoptimizer -import torch -from model_onnx import SynthesizerTrn -import utils -from hubert import hubert_model_onnx - -def main(HubertExport,NetExport): - - path = "NyaruTaffy" - - if(HubertExport): - device = torch.device("cuda") - hubert_soft = utils.get_hubert_model() - test_input = torch.rand(1, 1, 16000) - input_names = ["source"] - output_names = ["embed"] - torch.onnx.export(hubert_soft.to(device), - test_input.to(device), - "hubert3.0.onnx", - dynamic_axes={ - "source": { - 2: "sample_length" - } - }, - verbose=False, - opset_version=13, - input_names=input_names, - output_names=output_names) - if(NetExport): - device = torch.device("cuda") - hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") - SVCVITS = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model) - _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", SVCVITS, None) - _ = SVCVITS.eval().to(device) - for i in SVCVITS.parameters(): - i.requires_grad = False - test_hidden_unit = torch.rand(1, 50, 256) - test_lengths = torch.LongTensor([50]) - test_pitch = torch.rand(1, 50) - test_sid = torch.LongTensor([0]) - input_names = ["hidden_unit", "lengths", "pitch", "sid"] - output_names = ["audio", ] - SVCVITS.eval() - torch.onnx.export(SVCVITS, - ( - test_hidden_unit.to(device), - test_lengths.to(device), - test_pitch.to(device), - test_sid.to(device) - ), - f"checkpoints/{path}/model.onnx", - dynamic_axes={ - "hidden_unit": [0, 1], - "pitch": [1] - }, - do_constant_folding=False, - opset_version=16, - verbose=False, - input_names=input_names, - output_names=output_names) - - -if __name__ == '__main__': - main(False,True) diff --git a/spaces/Illumotion/Koboldcpp/.github/ISSUE_TEMPLATE/custom.md b/spaces/Illumotion/Koboldcpp/.github/ISSUE_TEMPLATE/custom.md deleted file mode 100644 index 8fd95535677803e6349e9ce769cb51f5814e1458..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/.github/ISSUE_TEMPLATE/custom.md +++ /dev/null @@ -1,185 +0,0 @@ ---- -name: Issue and enhancement template -about: Used to report issues and request enhancements for llama.cpp -title: "[User] Insert summary of your issue or enhancement.." -labels: '' -assignees: '' - ---- - -# Prerequisites - -Please answer the following questions for yourself before submitting an issue. - -- [ ] I am running the latest code. Development is very rapid so there are no tagged versions as of now. -- [ ] I carefully followed the [README.md](https://github.com/ggerganov/llama.cpp/blob/master/README.md). -- [ ] I [searched using keywords relevant to my issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/filtering-and-searching-issues-and-pull-requests) to make sure that I am creating a new issue that is not already open (or closed). -- [ ] I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new bug or useful enhancement to share. - -# Expected Behavior - -Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do. - -# Current Behavior - -Please provide a detailed written description of what `llama.cpp` did, instead. - -# Environment and Context - -Please provide detailed information about your computer setup. This is important in case the issue is not reproducible except for under certain specific conditions. - -* Physical (or virtual) hardware you are using, e.g. for Linux: - -`$ lscpu` - -* Operating System, e.g. for Linux: - -`$ uname -a` - -* SDK version, e.g. for Linux: - -``` -$ python3 --version -$ make --version -$ g++ --version -``` - -# Failure Information (for bugs) - -Please help provide information about the failure if this is a bug. If it is not a bug, please remove the rest of this template. - -# Steps to Reproduce - -Please provide detailed steps for reproducing the issue. We are not sitting in front of your screen, so the more detail the better. - -1. step 1 -2. step 2 -3. step 3 -4. etc. - -# Failure Logs - -Please include any relevant log snippets or files. If it works under one configuration but not under another, please provide logs for both configurations and their corresponding outputs so it is easy to see where behavior changes. - -Also, please try to **avoid using screenshots** if at all possible. Instead, copy/paste the console output and use [Github's markdown](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) to cleanly format your logs for easy readability. - -Example environment info: -``` -llama.cpp$ git log | head -1 -commit 2af23d30434a677c6416812eea52ccc0af65119c - -llama.cpp$ lscpu | egrep "AMD|Flags" -Vendor ID: AuthenticAMD -Model name: AMD Ryzen Threadripper 1950X 16-Core Processor -Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid amd_dcm aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb hw_pstate ssbd ibpb vmmcall fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 xsaves clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif overflow_recov succor smca sme sev -Virtualization: AMD-V - -llama.cpp$ python3 --version -Python 3.10.9 - -llama.cpp$ pip list | egrep "torch|numpy|sentencepiece" -numpy 1.24.2 -numpydoc 1.5.0 -sentencepiece 0.1.97 -torch 1.13.1 -torchvision 0.14.1 - -llama.cpp$ make --version | head -1 -GNU Make 4.3 - -$ md5sum ./models/65B/ggml-model-q4_0.bin -dbdd682cce80e2d6e93cefc7449df487 ./models/65B/ggml-model-q4_0.bin -``` - -Example run with the Linux command [perf](https://www.brendangregg.com/perf.html) -``` -llama.cpp$ perf stat ./main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p "Please close your issue when it has been answered." -main: seed = 1679149377 -llama_model_load: loading model from './models/65B/ggml-model-q4_0.bin' - please wait ... -llama_model_load: n_vocab = 32000 -llama_model_load: n_ctx = 512 -llama_model_load: n_embd = 8192 -llama_model_load: n_mult = 256 -llama_model_load: n_head = 64 -llama_model_load: n_layer = 80 -llama_model_load: n_rot = 128 -llama_model_load: f16 = 2 -llama_model_load: n_ff = 22016 -llama_model_load: n_parts = 8 -llama_model_load: ggml ctx size = 41477.73 MB -llama_model_load: memory_size = 2560.00 MB, n_mem = 40960 -llama_model_load: loading model part 1/8 from './models/65B/ggml-model-q4_0.bin' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 2/8 from './models/65B/ggml-model-q4_0.bin.1' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 3/8 from './models/65B/ggml-model-q4_0.bin.2' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 4/8 from './models/65B/ggml-model-q4_0.bin.3' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 5/8 from './models/65B/ggml-model-q4_0.bin.4' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 6/8 from './models/65B/ggml-model-q4_0.bin.5' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 7/8 from './models/65B/ggml-model-q4_0.bin.6' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 -llama_model_load: loading model part 8/8 from './models/65B/ggml-model-q4_0.bin.7' -llama_model_load: .......................................................................................... done -llama_model_load: model size = 4869.09 MB / num tensors = 723 - -system_info: n_threads = 16 / 32 | AVX = 1 | AVX2 = 1 | AVX512 = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | VSX = 0 | - -main: prompt: 'Please close your issue when it has been answered.' -main: number of tokens in prompt = 11 - 1 -> '' - 12148 -> 'Please' - 3802 -> ' close' - 596 -> ' your' - 2228 -> ' issue' - 746 -> ' when' - 372 -> ' it' - 756 -> ' has' - 1063 -> ' been' - 7699 -> ' answered' - 29889 -> '.' - -sampling parameters: temp = 0.800000, top_k = 40, top_p = 0.950000, repeat_last_n = 64, repeat_penalty = 1.300000 - - -Please close your issue when it has been answered. -@duncan-donut: I'm trying to figure out what kind of "support" you need for this script and why, exactly? Is there a question about how the code works that hasn't already been addressed in one or more comments below this ticket, or are we talking something else entirely like some sorta bugfixing job because your server setup is different from mine?? -I can understand if your site needs to be running smoothly and you need help with a fix of sorts but there should really be nothing wrong here that the code itself could not handle. And given that I'm getting reports about how it works perfectly well on some other servers, what exactly are we talking? A detailed report will do wonders in helping us get this resolved for ya quickly so please take your time and describe the issue(s) you see as clearly & concisely as possible!! -@duncan-donut: I'm not sure if you have access to cPanel but you could try these instructions. It is worth a shot! Let me know how it goes (or what error message, exactly!) when/if ya give that code a go? [end of text] - - -main: mem per token = 71159620 bytes -main: load time = 19309.95 ms -main: sample time = 168.62 ms -main: predict time = 223895.61 ms / 888.47 ms per token -main: total time = 246406.42 ms - - Performance counter stats for './main -m ./models/65B/ggml-model-q4_0.bin -t 16 -n 1024 -p Please close your issue when it has been answered.': - - 3636882.89 msec task-clock # 14.677 CPUs utilized - 13509 context-switches # 3.714 /sec - 2436 cpu-migrations # 0.670 /sec - 10476679 page-faults # 2.881 K/sec - 13133115082869 cycles # 3.611 GHz (16.77%) - 29314462753 stalled-cycles-frontend # 0.22% frontend cycles idle (16.76%) - 10294402631459 stalled-cycles-backend # 78.39% backend cycles idle (16.74%) - 23479217109614 instructions # 1.79 insn per cycle - # 0.44 stalled cycles per insn (16.76%) - 2353072268027 branches # 647.002 M/sec (16.77%) - 1998682780 branch-misses # 0.08% of all branches (16.76%) - - 247.802177522 seconds time elapsed - - 3618.573072000 seconds user - 18.491698000 seconds sys -``` diff --git a/spaces/Illumotion/Koboldcpp/examples/server/json.hpp b/spaces/Illumotion/Koboldcpp/examples/server/json.hpp deleted file mode 100644 index 4d1a37ad7cb874769d911fc3362d567da3aca291..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/examples/server/json.hpp +++ /dev/null @@ -1,24596 +0,0 @@ -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - -/****************************************************************************\ - * Note on documentation: The source files contain links to the online * - * documentation of the public API at https://json.nlohmann.me. This URL * - * contains the most recent documentation and should also be applicable to * - * previous versions; documentation for deprecated functions is not * - * removed, but marked deprecated. See "Generate documentation" section in * - * file docs/README.md. * -\****************************************************************************/ - -#ifndef INCLUDE_NLOHMANN_JSON_HPP_ -#define INCLUDE_NLOHMANN_JSON_HPP_ - -#include // all_of, find, for_each -#include // nullptr_t, ptrdiff_t, size_t -#include // hash, less -#include // initializer_list -#ifndef JSON_NO_IO - #include // istream, ostream -#endif // JSON_NO_IO -#include // random_access_iterator_tag -#include // unique_ptr -#include // accumulate -#include // string, stoi, to_string -#include // declval, forward, move, pair, swap -#include // vector - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -#include - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -// This file contains all macro definitions affecting or depending on the ABI - -#ifndef JSON_SKIP_LIBRARY_VERSION_CHECK - #if defined(NLOHMANN_JSON_VERSION_MAJOR) && defined(NLOHMANN_JSON_VERSION_MINOR) && defined(NLOHMANN_JSON_VERSION_PATCH) - #if NLOHMANN_JSON_VERSION_MAJOR != 3 || NLOHMANN_JSON_VERSION_MINOR != 11 || NLOHMANN_JSON_VERSION_PATCH != 2 - #warning "Already included a different version of the library!" - #endif - #endif -#endif - -#define NLOHMANN_JSON_VERSION_MAJOR 3 // NOLINT(modernize-macro-to-enum) -#define NLOHMANN_JSON_VERSION_MINOR 11 // NOLINT(modernize-macro-to-enum) -#define NLOHMANN_JSON_VERSION_PATCH 2 // NOLINT(modernize-macro-to-enum) - -#ifndef JSON_DIAGNOSTICS - #define JSON_DIAGNOSTICS 0 -#endif - -#ifndef JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON - #define JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON 0 -#endif - -#if JSON_DIAGNOSTICS - #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS _diag -#else - #define NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS -#endif - -#if JSON_USE_LEGACY_DISCARDED_VALUE_COMPARISON - #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON _ldvcmp -#else - #define NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON -#endif - -#ifndef NLOHMANN_JSON_NAMESPACE_NO_VERSION - #define NLOHMANN_JSON_NAMESPACE_NO_VERSION 0 -#endif - -// Construct the namespace ABI tags component -#define NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) json_abi ## a ## b -#define NLOHMANN_JSON_ABI_TAGS_CONCAT(a, b) \ - NLOHMANN_JSON_ABI_TAGS_CONCAT_EX(a, b) - -#define NLOHMANN_JSON_ABI_TAGS \ - NLOHMANN_JSON_ABI_TAGS_CONCAT( \ - NLOHMANN_JSON_ABI_TAG_DIAGNOSTICS, \ - NLOHMANN_JSON_ABI_TAG_LEGACY_DISCARDED_VALUE_COMPARISON) - -// Construct the namespace version component -#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) \ - _v ## major ## _ ## minor ## _ ## patch -#define NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(major, minor, patch) \ - NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT_EX(major, minor, patch) - -#if NLOHMANN_JSON_NAMESPACE_NO_VERSION -#define NLOHMANN_JSON_NAMESPACE_VERSION -#else -#define NLOHMANN_JSON_NAMESPACE_VERSION \ - NLOHMANN_JSON_NAMESPACE_VERSION_CONCAT(NLOHMANN_JSON_VERSION_MAJOR, \ - NLOHMANN_JSON_VERSION_MINOR, \ - NLOHMANN_JSON_VERSION_PATCH) -#endif - -// Combine namespace components -#define NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) a ## b -#define NLOHMANN_JSON_NAMESPACE_CONCAT(a, b) \ - NLOHMANN_JSON_NAMESPACE_CONCAT_EX(a, b) - -#ifndef NLOHMANN_JSON_NAMESPACE -#define NLOHMANN_JSON_NAMESPACE \ - nlohmann::NLOHMANN_JSON_NAMESPACE_CONCAT( \ - NLOHMANN_JSON_ABI_TAGS, \ - NLOHMANN_JSON_NAMESPACE_VERSION) -#endif - -#ifndef NLOHMANN_JSON_NAMESPACE_BEGIN -#define NLOHMANN_JSON_NAMESPACE_BEGIN \ - namespace nlohmann \ - { \ - inline namespace NLOHMANN_JSON_NAMESPACE_CONCAT( \ - NLOHMANN_JSON_ABI_TAGS, \ - NLOHMANN_JSON_NAMESPACE_VERSION) \ - { -#endif - -#ifndef NLOHMANN_JSON_NAMESPACE_END -#define NLOHMANN_JSON_NAMESPACE_END \ - } /* namespace (inline namespace) NOLINT(readability/namespace) */ \ - } // namespace nlohmann -#endif - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -#include // transform -#include // array -#include // forward_list -#include // inserter, front_inserter, end -#include // map -#include // string -#include // tuple, make_tuple -#include // is_arithmetic, is_same, is_enum, underlying_type, is_convertible -#include // unordered_map -#include // pair, declval -#include // valarray - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -#include // nullptr_t -#include // exception -#include // runtime_error -#include // to_string -#include // vector - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -#include // array -#include // size_t -#include // uint8_t -#include // string - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -#include // declval, pair -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -#include - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN -namespace detail -{ - -template struct make_void -{ - using type = void; -}; -template using void_t = typename make_void::type; - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - - -NLOHMANN_JSON_NAMESPACE_BEGIN -namespace detail -{ - -// https://en.cppreference.com/w/cpp/experimental/is_detected -struct nonesuch -{ - nonesuch() = delete; - ~nonesuch() = delete; - nonesuch(nonesuch const&) = delete; - nonesuch(nonesuch const&&) = delete; - void operator=(nonesuch const&) = delete; - void operator=(nonesuch&&) = delete; -}; - -template class Op, - class... Args> -struct detector -{ - using value_t = std::false_type; - using type = Default; -}; - -template class Op, class... Args> -struct detector>, Op, Args...> -{ - using value_t = std::true_type; - using type = Op; -}; - -template class Op, class... Args> -using is_detected = typename detector::value_t; - -template class Op, class... Args> -struct is_detected_lazy : is_detected { }; - -template class Op, class... Args> -using detected_t = typename detector::type; - -template class Op, class... Args> -using detected_or = detector; - -template class Op, class... Args> -using detected_or_t = typename detected_or::type; - -template class Op, class... Args> -using is_detected_exact = std::is_same>; - -template class Op, class... Args> -using is_detected_convertible = - std::is_convertible, To>; - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - -// #include - - -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-FileCopyrightText: 2016-2021 Evan Nemerson -// SPDX-License-Identifier: MIT - -/* Hedley - https://nemequ.github.io/hedley - * Created by Evan Nemerson - */ - -#if !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < 15) -#if defined(JSON_HEDLEY_VERSION) - #undef JSON_HEDLEY_VERSION -#endif -#define JSON_HEDLEY_VERSION 15 - -#if defined(JSON_HEDLEY_STRINGIFY_EX) - #undef JSON_HEDLEY_STRINGIFY_EX -#endif -#define JSON_HEDLEY_STRINGIFY_EX(x) #x - -#if defined(JSON_HEDLEY_STRINGIFY) - #undef JSON_HEDLEY_STRINGIFY -#endif -#define JSON_HEDLEY_STRINGIFY(x) JSON_HEDLEY_STRINGIFY_EX(x) - -#if defined(JSON_HEDLEY_CONCAT_EX) - #undef JSON_HEDLEY_CONCAT_EX -#endif -#define JSON_HEDLEY_CONCAT_EX(a,b) a##b - -#if defined(JSON_HEDLEY_CONCAT) - #undef JSON_HEDLEY_CONCAT -#endif -#define JSON_HEDLEY_CONCAT(a,b) JSON_HEDLEY_CONCAT_EX(a,b) - -#if defined(JSON_HEDLEY_CONCAT3_EX) - #undef JSON_HEDLEY_CONCAT3_EX -#endif -#define JSON_HEDLEY_CONCAT3_EX(a,b,c) a##b##c - -#if defined(JSON_HEDLEY_CONCAT3) - #undef JSON_HEDLEY_CONCAT3 -#endif -#define JSON_HEDLEY_CONCAT3(a,b,c) JSON_HEDLEY_CONCAT3_EX(a,b,c) - -#if defined(JSON_HEDLEY_VERSION_ENCODE) - #undef JSON_HEDLEY_VERSION_ENCODE -#endif -#define JSON_HEDLEY_VERSION_ENCODE(major,minor,revision) (((major) * 1000000) + ((minor) * 1000) + (revision)) - -#if defined(JSON_HEDLEY_VERSION_DECODE_MAJOR) - #undef JSON_HEDLEY_VERSION_DECODE_MAJOR -#endif -#define JSON_HEDLEY_VERSION_DECODE_MAJOR(version) ((version) / 1000000) - -#if defined(JSON_HEDLEY_VERSION_DECODE_MINOR) - #undef JSON_HEDLEY_VERSION_DECODE_MINOR -#endif -#define JSON_HEDLEY_VERSION_DECODE_MINOR(version) (((version) % 1000000) / 1000) - -#if defined(JSON_HEDLEY_VERSION_DECODE_REVISION) - #undef JSON_HEDLEY_VERSION_DECODE_REVISION -#endif -#define JSON_HEDLEY_VERSION_DECODE_REVISION(version) ((version) % 1000) - -#if defined(JSON_HEDLEY_GNUC_VERSION) - #undef JSON_HEDLEY_GNUC_VERSION -#endif -#if defined(__GNUC__) && defined(__GNUC_PATCHLEVEL__) - #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__) -#elif defined(__GNUC__) - #define JSON_HEDLEY_GNUC_VERSION JSON_HEDLEY_VERSION_ENCODE(__GNUC__, __GNUC_MINOR__, 0) -#endif - -#if defined(JSON_HEDLEY_GNUC_VERSION_CHECK) - #undef JSON_HEDLEY_GNUC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_GNUC_VERSION) - #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GNUC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_MSVC_VERSION) - #undef JSON_HEDLEY_MSVC_VERSION -#endif -#if defined(_MSC_FULL_VER) && (_MSC_FULL_VER >= 140000000) && !defined(__ICL) - #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 10000000, (_MSC_FULL_VER % 10000000) / 100000, (_MSC_FULL_VER % 100000) / 100) -#elif defined(_MSC_FULL_VER) && !defined(__ICL) - #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_FULL_VER / 1000000, (_MSC_FULL_VER % 1000000) / 10000, (_MSC_FULL_VER % 10000) / 10) -#elif defined(_MSC_VER) && !defined(__ICL) - #define JSON_HEDLEY_MSVC_VERSION JSON_HEDLEY_VERSION_ENCODE(_MSC_VER / 100, _MSC_VER % 100, 0) -#endif - -#if defined(JSON_HEDLEY_MSVC_VERSION_CHECK) - #undef JSON_HEDLEY_MSVC_VERSION_CHECK -#endif -#if !defined(JSON_HEDLEY_MSVC_VERSION) - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (0) -#elif defined(_MSC_VER) && (_MSC_VER >= 1400) - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 10000000) + (minor * 100000) + (patch))) -#elif defined(_MSC_VER) && (_MSC_VER >= 1200) - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_FULL_VER >= ((major * 1000000) + (minor * 10000) + (patch))) -#else - #define JSON_HEDLEY_MSVC_VERSION_CHECK(major,minor,patch) (_MSC_VER >= ((major * 100) + (minor))) -#endif - -#if defined(JSON_HEDLEY_INTEL_VERSION) - #undef JSON_HEDLEY_INTEL_VERSION -#endif -#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && !defined(__ICL) - #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, __INTEL_COMPILER_UPDATE) -#elif defined(__INTEL_COMPILER) && !defined(__ICL) - #define JSON_HEDLEY_INTEL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER / 100, __INTEL_COMPILER % 100, 0) -#endif - -#if defined(JSON_HEDLEY_INTEL_VERSION_CHECK) - #undef JSON_HEDLEY_INTEL_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_INTEL_VERSION) - #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_INTEL_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_INTEL_CL_VERSION) - #undef JSON_HEDLEY_INTEL_CL_VERSION -#endif -#if defined(__INTEL_COMPILER) && defined(__INTEL_COMPILER_UPDATE) && defined(__ICL) - #define JSON_HEDLEY_INTEL_CL_VERSION JSON_HEDLEY_VERSION_ENCODE(__INTEL_COMPILER, __INTEL_COMPILER_UPDATE, 0) -#endif - -#if defined(JSON_HEDLEY_INTEL_CL_VERSION_CHECK) - #undef JSON_HEDLEY_INTEL_CL_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_INTEL_CL_VERSION) - #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_INTEL_CL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_INTEL_CL_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_PGI_VERSION) - #undef JSON_HEDLEY_PGI_VERSION -#endif -#if defined(__PGI) && defined(__PGIC__) && defined(__PGIC_MINOR__) && defined(__PGIC_PATCHLEVEL__) - #define JSON_HEDLEY_PGI_VERSION JSON_HEDLEY_VERSION_ENCODE(__PGIC__, __PGIC_MINOR__, __PGIC_PATCHLEVEL__) -#endif - -#if defined(JSON_HEDLEY_PGI_VERSION_CHECK) - #undef JSON_HEDLEY_PGI_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_PGI_VERSION) - #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PGI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_PGI_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_SUNPRO_VERSION) - #undef JSON_HEDLEY_SUNPRO_VERSION -#endif -#if defined(__SUNPRO_C) && (__SUNPRO_C > 0x1000) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_C >> 16) & 0xf) * 10) + ((__SUNPRO_C >> 12) & 0xf), (((__SUNPRO_C >> 8) & 0xf) * 10) + ((__SUNPRO_C >> 4) & 0xf), (__SUNPRO_C & 0xf) * 10) -#elif defined(__SUNPRO_C) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_C >> 8) & 0xf, (__SUNPRO_C >> 4) & 0xf, (__SUNPRO_C) & 0xf) -#elif defined(__SUNPRO_CC) && (__SUNPRO_CC > 0x1000) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((((__SUNPRO_CC >> 16) & 0xf) * 10) + ((__SUNPRO_CC >> 12) & 0xf), (((__SUNPRO_CC >> 8) & 0xf) * 10) + ((__SUNPRO_CC >> 4) & 0xf), (__SUNPRO_CC & 0xf) * 10) -#elif defined(__SUNPRO_CC) - #define JSON_HEDLEY_SUNPRO_VERSION JSON_HEDLEY_VERSION_ENCODE((__SUNPRO_CC >> 8) & 0xf, (__SUNPRO_CC >> 4) & 0xf, (__SUNPRO_CC) & 0xf) -#endif - -#if defined(JSON_HEDLEY_SUNPRO_VERSION_CHECK) - #undef JSON_HEDLEY_SUNPRO_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_SUNPRO_VERSION) - #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_SUNPRO_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_SUNPRO_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) - #undef JSON_HEDLEY_EMSCRIPTEN_VERSION -#endif -#if defined(__EMSCRIPTEN__) - #define JSON_HEDLEY_EMSCRIPTEN_VERSION JSON_HEDLEY_VERSION_ENCODE(__EMSCRIPTEN_major__, __EMSCRIPTEN_minor__, __EMSCRIPTEN_tiny__) -#endif - -#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK) - #undef JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_EMSCRIPTEN_VERSION) - #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_EMSCRIPTEN_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_EMSCRIPTEN_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_ARM_VERSION) - #undef JSON_HEDLEY_ARM_VERSION -#endif -#if defined(__CC_ARM) && defined(__ARMCOMPILER_VERSION) - #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCOMPILER_VERSION / 1000000, (__ARMCOMPILER_VERSION % 1000000) / 10000, (__ARMCOMPILER_VERSION % 10000) / 100) -#elif defined(__CC_ARM) && defined(__ARMCC_VERSION) - #define JSON_HEDLEY_ARM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ARMCC_VERSION / 1000000, (__ARMCC_VERSION % 1000000) / 10000, (__ARMCC_VERSION % 10000) / 100) -#endif - -#if defined(JSON_HEDLEY_ARM_VERSION_CHECK) - #undef JSON_HEDLEY_ARM_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_ARM_VERSION) - #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_ARM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_ARM_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_IBM_VERSION) - #undef JSON_HEDLEY_IBM_VERSION -#endif -#if defined(__ibmxl__) - #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__ibmxl_version__, __ibmxl_release__, __ibmxl_modification__) -#elif defined(__xlC__) && defined(__xlC_ver__) - #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, (__xlC_ver__ >> 8) & 0xff) -#elif defined(__xlC__) - #define JSON_HEDLEY_IBM_VERSION JSON_HEDLEY_VERSION_ENCODE(__xlC__ >> 8, __xlC__ & 0xff, 0) -#endif - -#if defined(JSON_HEDLEY_IBM_VERSION_CHECK) - #undef JSON_HEDLEY_IBM_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_IBM_VERSION) - #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IBM_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_IBM_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_VERSION) - #undef JSON_HEDLEY_TI_VERSION -#endif -#if \ - defined(__TI_COMPILER_VERSION__) && \ - ( \ - defined(__TMS470__) || defined(__TI_ARM__) || \ - defined(__MSP430__) || \ - defined(__TMS320C2000__) \ - ) -#if (__TI_COMPILER_VERSION__ >= 16000000) - #define JSON_HEDLEY_TI_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif -#endif - -#if defined(JSON_HEDLEY_TI_VERSION_CHECK) - #undef JSON_HEDLEY_TI_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_VERSION) - #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CL2000_VERSION) - #undef JSON_HEDLEY_TI_CL2000_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C2000__) - #define JSON_HEDLEY_TI_CL2000_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CL2000_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CL2000_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CL2000_VERSION) - #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL2000_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CL2000_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CL430_VERSION) - #undef JSON_HEDLEY_TI_CL430_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__MSP430__) - #define JSON_HEDLEY_TI_CL430_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CL430_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CL430_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CL430_VERSION) - #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL430_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CL430_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) - #undef JSON_HEDLEY_TI_ARMCL_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && (defined(__TMS470__) || defined(__TI_ARM__)) - #define JSON_HEDLEY_TI_ARMCL_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_ARMCL_VERSION_CHECK) - #undef JSON_HEDLEY_TI_ARMCL_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_ARMCL_VERSION) - #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_ARMCL_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CL6X_VERSION) - #undef JSON_HEDLEY_TI_CL6X_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__TMS320C6X__) - #define JSON_HEDLEY_TI_CL6X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CL6X_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CL6X_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CL6X_VERSION) - #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL6X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CL6X_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CL7X_VERSION) - #undef JSON_HEDLEY_TI_CL7X_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__C7000__) - #define JSON_HEDLEY_TI_CL7X_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CL7X_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CL7X_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CL7X_VERSION) - #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CL7X_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CL7X_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) - #undef JSON_HEDLEY_TI_CLPRU_VERSION -#endif -#if defined(__TI_COMPILER_VERSION__) && defined(__PRU__) - #define JSON_HEDLEY_TI_CLPRU_VERSION JSON_HEDLEY_VERSION_ENCODE(__TI_COMPILER_VERSION__ / 1000000, (__TI_COMPILER_VERSION__ % 1000000) / 1000, (__TI_COMPILER_VERSION__ % 1000)) -#endif - -#if defined(JSON_HEDLEY_TI_CLPRU_VERSION_CHECK) - #undef JSON_HEDLEY_TI_CLPRU_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TI_CLPRU_VERSION) - #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TI_CLPRU_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_CRAY_VERSION) - #undef JSON_HEDLEY_CRAY_VERSION -#endif -#if defined(_CRAYC) - #if defined(_RELEASE_PATCHLEVEL) - #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, _RELEASE_PATCHLEVEL) - #else - #define JSON_HEDLEY_CRAY_VERSION JSON_HEDLEY_VERSION_ENCODE(_RELEASE_MAJOR, _RELEASE_MINOR, 0) - #endif -#endif - -#if defined(JSON_HEDLEY_CRAY_VERSION_CHECK) - #undef JSON_HEDLEY_CRAY_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_CRAY_VERSION) - #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_CRAY_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_CRAY_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_IAR_VERSION) - #undef JSON_HEDLEY_IAR_VERSION -#endif -#if defined(__IAR_SYSTEMS_ICC__) - #if __VER__ > 1000 - #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE((__VER__ / 1000000), ((__VER__ / 1000) % 1000), (__VER__ % 1000)) - #else - #define JSON_HEDLEY_IAR_VERSION JSON_HEDLEY_VERSION_ENCODE(__VER__ / 100, __VER__ % 100, 0) - #endif -#endif - -#if defined(JSON_HEDLEY_IAR_VERSION_CHECK) - #undef JSON_HEDLEY_IAR_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_IAR_VERSION) - #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_IAR_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_IAR_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_TINYC_VERSION) - #undef JSON_HEDLEY_TINYC_VERSION -#endif -#if defined(__TINYC__) - #define JSON_HEDLEY_TINYC_VERSION JSON_HEDLEY_VERSION_ENCODE(__TINYC__ / 1000, (__TINYC__ / 100) % 10, __TINYC__ % 100) -#endif - -#if defined(JSON_HEDLEY_TINYC_VERSION_CHECK) - #undef JSON_HEDLEY_TINYC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_TINYC_VERSION) - #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_TINYC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_TINYC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_DMC_VERSION) - #undef JSON_HEDLEY_DMC_VERSION -#endif -#if defined(__DMC__) - #define JSON_HEDLEY_DMC_VERSION JSON_HEDLEY_VERSION_ENCODE(__DMC__ >> 8, (__DMC__ >> 4) & 0xf, __DMC__ & 0xf) -#endif - -#if defined(JSON_HEDLEY_DMC_VERSION_CHECK) - #undef JSON_HEDLEY_DMC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_DMC_VERSION) - #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_DMC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_DMC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_COMPCERT_VERSION) - #undef JSON_HEDLEY_COMPCERT_VERSION -#endif -#if defined(__COMPCERT_VERSION__) - #define JSON_HEDLEY_COMPCERT_VERSION JSON_HEDLEY_VERSION_ENCODE(__COMPCERT_VERSION__ / 10000, (__COMPCERT_VERSION__ / 100) % 100, __COMPCERT_VERSION__ % 100) -#endif - -#if defined(JSON_HEDLEY_COMPCERT_VERSION_CHECK) - #undef JSON_HEDLEY_COMPCERT_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_COMPCERT_VERSION) - #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_COMPCERT_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_COMPCERT_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_PELLES_VERSION) - #undef JSON_HEDLEY_PELLES_VERSION -#endif -#if defined(__POCC__) - #define JSON_HEDLEY_PELLES_VERSION JSON_HEDLEY_VERSION_ENCODE(__POCC__ / 100, __POCC__ % 100, 0) -#endif - -#if defined(JSON_HEDLEY_PELLES_VERSION_CHECK) - #undef JSON_HEDLEY_PELLES_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_PELLES_VERSION) - #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_PELLES_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_PELLES_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_MCST_LCC_VERSION) - #undef JSON_HEDLEY_MCST_LCC_VERSION -#endif -#if defined(__LCC__) && defined(__LCC_MINOR__) - #define JSON_HEDLEY_MCST_LCC_VERSION JSON_HEDLEY_VERSION_ENCODE(__LCC__ / 100, __LCC__ % 100, __LCC_MINOR__) -#endif - -#if defined(JSON_HEDLEY_MCST_LCC_VERSION_CHECK) - #undef JSON_HEDLEY_MCST_LCC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_MCST_LCC_VERSION) - #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_MCST_LCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_MCST_LCC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_GCC_VERSION) - #undef JSON_HEDLEY_GCC_VERSION -#endif -#if \ - defined(JSON_HEDLEY_GNUC_VERSION) && \ - !defined(__clang__) && \ - !defined(JSON_HEDLEY_INTEL_VERSION) && \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_ARM_VERSION) && \ - !defined(JSON_HEDLEY_CRAY_VERSION) && \ - !defined(JSON_HEDLEY_TI_VERSION) && \ - !defined(JSON_HEDLEY_TI_ARMCL_VERSION) && \ - !defined(JSON_HEDLEY_TI_CL430_VERSION) && \ - !defined(JSON_HEDLEY_TI_CL2000_VERSION) && \ - !defined(JSON_HEDLEY_TI_CL6X_VERSION) && \ - !defined(JSON_HEDLEY_TI_CL7X_VERSION) && \ - !defined(JSON_HEDLEY_TI_CLPRU_VERSION) && \ - !defined(__COMPCERT__) && \ - !defined(JSON_HEDLEY_MCST_LCC_VERSION) - #define JSON_HEDLEY_GCC_VERSION JSON_HEDLEY_GNUC_VERSION -#endif - -#if defined(JSON_HEDLEY_GCC_VERSION_CHECK) - #undef JSON_HEDLEY_GCC_VERSION_CHECK -#endif -#if defined(JSON_HEDLEY_GCC_VERSION) - #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (JSON_HEDLEY_GCC_VERSION >= JSON_HEDLEY_VERSION_ENCODE(major, minor, patch)) -#else - #define JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) (0) -#endif - -#if defined(JSON_HEDLEY_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_HAS_ATTRIBUTE -#endif -#if \ - defined(__has_attribute) && \ - ( \ - (!defined(JSON_HEDLEY_IAR_VERSION) || JSON_HEDLEY_IAR_VERSION_CHECK(8,5,9)) \ - ) -# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) __has_attribute(attribute) -#else -# define JSON_HEDLEY_HAS_ATTRIBUTE(attribute) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_GNUC_HAS_ATTRIBUTE -#endif -#if defined(__has_attribute) - #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) -#else - #define JSON_HEDLEY_GNUC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_GCC_HAS_ATTRIBUTE -#endif -#if defined(__has_attribute) - #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) -#else - #define JSON_HEDLEY_GCC_HAS_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE -#endif -#if \ - defined(__has_cpp_attribute) && \ - defined(__cplusplus) && \ - (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) __has_cpp_attribute(attribute) -#else - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) (0) -#endif - -#if defined(JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS) - #undef JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS -#endif -#if !defined(__cplusplus) || !defined(__has_cpp_attribute) - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) -#elif \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_IAR_VERSION) && \ - (!defined(JSON_HEDLEY_SUNPRO_VERSION) || JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0)) && \ - (!defined(JSON_HEDLEY_MSVC_VERSION) || JSON_HEDLEY_MSVC_VERSION_CHECK(19,20,0)) - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(ns::attribute) -#else - #define JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(ns,attribute) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE -#endif -#if defined(__has_cpp_attribute) && defined(__cplusplus) - #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) -#else - #define JSON_HEDLEY_GNUC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE -#endif -#if defined(__has_cpp_attribute) && defined(__cplusplus) - #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) __has_cpp_attribute(attribute) -#else - #define JSON_HEDLEY_GCC_HAS_CPP_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_BUILTIN) - #undef JSON_HEDLEY_HAS_BUILTIN -#endif -#if defined(__has_builtin) - #define JSON_HEDLEY_HAS_BUILTIN(builtin) __has_builtin(builtin) -#else - #define JSON_HEDLEY_HAS_BUILTIN(builtin) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_BUILTIN) - #undef JSON_HEDLEY_GNUC_HAS_BUILTIN -#endif -#if defined(__has_builtin) - #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) -#else - #define JSON_HEDLEY_GNUC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_BUILTIN) - #undef JSON_HEDLEY_GCC_HAS_BUILTIN -#endif -#if defined(__has_builtin) - #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) __has_builtin(builtin) -#else - #define JSON_HEDLEY_GCC_HAS_BUILTIN(builtin,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_FEATURE) - #undef JSON_HEDLEY_HAS_FEATURE -#endif -#if defined(__has_feature) - #define JSON_HEDLEY_HAS_FEATURE(feature) __has_feature(feature) -#else - #define JSON_HEDLEY_HAS_FEATURE(feature) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_FEATURE) - #undef JSON_HEDLEY_GNUC_HAS_FEATURE -#endif -#if defined(__has_feature) - #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) -#else - #define JSON_HEDLEY_GNUC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_FEATURE) - #undef JSON_HEDLEY_GCC_HAS_FEATURE -#endif -#if defined(__has_feature) - #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) __has_feature(feature) -#else - #define JSON_HEDLEY_GCC_HAS_FEATURE(feature,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_EXTENSION) - #undef JSON_HEDLEY_HAS_EXTENSION -#endif -#if defined(__has_extension) - #define JSON_HEDLEY_HAS_EXTENSION(extension) __has_extension(extension) -#else - #define JSON_HEDLEY_HAS_EXTENSION(extension) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_EXTENSION) - #undef JSON_HEDLEY_GNUC_HAS_EXTENSION -#endif -#if defined(__has_extension) - #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) -#else - #define JSON_HEDLEY_GNUC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_EXTENSION) - #undef JSON_HEDLEY_GCC_HAS_EXTENSION -#endif -#if defined(__has_extension) - #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) __has_extension(extension) -#else - #define JSON_HEDLEY_GCC_HAS_EXTENSION(extension,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE -#endif -#if defined(__has_declspec_attribute) - #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) __has_declspec_attribute(attribute) -#else - #define JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE -#endif -#if defined(__has_declspec_attribute) - #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) -#else - #define JSON_HEDLEY_GNUC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE -#endif -#if defined(__has_declspec_attribute) - #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) __has_declspec_attribute(attribute) -#else - #define JSON_HEDLEY_GCC_HAS_DECLSPEC_ATTRIBUTE(attribute,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_HAS_WARNING) - #undef JSON_HEDLEY_HAS_WARNING -#endif -#if defined(__has_warning) - #define JSON_HEDLEY_HAS_WARNING(warning) __has_warning(warning) -#else - #define JSON_HEDLEY_HAS_WARNING(warning) (0) -#endif - -#if defined(JSON_HEDLEY_GNUC_HAS_WARNING) - #undef JSON_HEDLEY_GNUC_HAS_WARNING -#endif -#if defined(__has_warning) - #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) -#else - #define JSON_HEDLEY_GNUC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GNUC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_GCC_HAS_WARNING) - #undef JSON_HEDLEY_GCC_HAS_WARNING -#endif -#if defined(__has_warning) - #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) __has_warning(warning) -#else - #define JSON_HEDLEY_GCC_HAS_WARNING(warning,major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ - defined(__clang__) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,0,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,17) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(8,0,0) || \ - (JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) && defined(__C99_PRAGMA_OPERATOR)) - #define JSON_HEDLEY_PRAGMA(value) _Pragma(#value) -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_PRAGMA(value) __pragma(value) -#else - #define JSON_HEDLEY_PRAGMA(value) -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_PUSH) - #undef JSON_HEDLEY_DIAGNOSTIC_PUSH -#endif -#if defined(JSON_HEDLEY_DIAGNOSTIC_POP) - #undef JSON_HEDLEY_DIAGNOSTIC_POP -#endif -#if defined(__clang__) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH __pragma(warning(push)) - #define JSON_HEDLEY_DIAGNOSTIC_POP __pragma(warning(pop)) -#elif JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("pop") -#elif \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,4,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("diag_push") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("diag_pop") -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) - #define JSON_HEDLEY_DIAGNOSTIC_PUSH _Pragma("warning(push)") - #define JSON_HEDLEY_DIAGNOSTIC_POP _Pragma("warning(pop)") -#else - #define JSON_HEDLEY_DIAGNOSTIC_PUSH - #define JSON_HEDLEY_DIAGNOSTIC_POP -#endif - -/* JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ is for - HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_ -#endif -#if defined(__cplusplus) -# if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat") -# if JSON_HEDLEY_HAS_WARNING("-Wc++17-extensions") -# if JSON_HEDLEY_HAS_WARNING("-Wc++1z-extensions") -# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ - _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ - _Pragma("clang diagnostic ignored \"-Wc++1z-extensions\"") \ - xpr \ - JSON_HEDLEY_DIAGNOSTIC_POP -# else -# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ - _Pragma("clang diagnostic ignored \"-Wc++17-extensions\"") \ - xpr \ - JSON_HEDLEY_DIAGNOSTIC_POP -# endif -# else -# define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(xpr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wc++98-compat\"") \ - xpr \ - JSON_HEDLEY_DIAGNOSTIC_POP -# endif -# endif -#endif -#if !defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(x) x -#endif - -#if defined(JSON_HEDLEY_CONST_CAST) - #undef JSON_HEDLEY_CONST_CAST -#endif -#if defined(__cplusplus) -# define JSON_HEDLEY_CONST_CAST(T, expr) (const_cast(expr)) -#elif \ - JSON_HEDLEY_HAS_WARNING("-Wcast-qual") || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) -# define JSON_HEDLEY_CONST_CAST(T, expr) (__extension__ ({ \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL \ - ((T) (expr)); \ - JSON_HEDLEY_DIAGNOSTIC_POP \ - })) -#else -# define JSON_HEDLEY_CONST_CAST(T, expr) ((T) (expr)) -#endif - -#if defined(JSON_HEDLEY_REINTERPRET_CAST) - #undef JSON_HEDLEY_REINTERPRET_CAST -#endif -#if defined(__cplusplus) - #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) (reinterpret_cast(expr)) -#else - #define JSON_HEDLEY_REINTERPRET_CAST(T, expr) ((T) (expr)) -#endif - -#if defined(JSON_HEDLEY_STATIC_CAST) - #undef JSON_HEDLEY_STATIC_CAST -#endif -#if defined(__cplusplus) - #define JSON_HEDLEY_STATIC_CAST(T, expr) (static_cast(expr)) -#else - #define JSON_HEDLEY_STATIC_CAST(T, expr) ((T) (expr)) -#endif - -#if defined(JSON_HEDLEY_CPP_CAST) - #undef JSON_HEDLEY_CPP_CAST -#endif -#if defined(__cplusplus) -# if JSON_HEDLEY_HAS_WARNING("-Wold-style-cast") -# define JSON_HEDLEY_CPP_CAST(T, expr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wold-style-cast\"") \ - ((T) (expr)) \ - JSON_HEDLEY_DIAGNOSTIC_POP -# elif JSON_HEDLEY_IAR_VERSION_CHECK(8,3,0) -# define JSON_HEDLEY_CPP_CAST(T, expr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("diag_suppress=Pe137") \ - JSON_HEDLEY_DIAGNOSTIC_POP -# else -# define JSON_HEDLEY_CPP_CAST(T, expr) ((T) (expr)) -# endif -#else -# define JSON_HEDLEY_CPP_CAST(T, expr) (expr) -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wdeprecated-declarations") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warning(disable:1478 1786)") -#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:1478 1786)) -#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1216,1444,1445") -#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED __pragma(warning(disable:4996)) -#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1215,1444") -#elif \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress 1291,1718") -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && !defined(__cplusplus) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,E_DEPRECATED_ATT,E_DEPRECATED_ATT_MESS)") -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) && defined(__cplusplus) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("error_messages(off,symdeprecated,symdeprecated2)") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("diag_suppress=Pe1444,Pe1215") -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,90,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED _Pragma("warn(disable:2241)") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("clang diagnostic ignored \"-Wunknown-pragmas\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("warning(disable:161)") -#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:161)) -#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 1675") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("GCC diagnostic ignored \"-Wunknown-pragmas\"") -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS __pragma(warning(disable:4068)) -#elif \ - JSON_HEDLEY_TI_VERSION_CHECK(16,9,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") -#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 163") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress=Pe161") -#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS _Pragma("diag_suppress 161") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunknown-attributes") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("clang diagnostic ignored \"-Wunknown-attributes\"") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(4,6,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("warning(disable:1292)") -#elif JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:1292)) -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES __pragma(warning(disable:5030)) -#elif JSON_HEDLEY_PGI_VERSION_CHECK(20,7,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097,1098") -#elif JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("error_messages(off,attrskipunsup)") -#elif \ - JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1173") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress=Pe1097") -#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES _Pragma("diag_suppress 1097") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_CPP_ATTRIBUTES -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wcast-qual") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("clang diagnostic ignored \"-Wcast-qual\"") -#elif JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("warning(disable:2203 2331)") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL _Pragma("GCC diagnostic ignored \"-Wcast-qual\"") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_CAST_QUAL -#endif - -#if defined(JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION) - #undef JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunused-function") - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("clang diagnostic ignored \"-Wunused-function\"") -#elif JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("GCC diagnostic ignored \"-Wunused-function\"") -#elif JSON_HEDLEY_MSVC_VERSION_CHECK(1,0,0) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION __pragma(warning(disable:4505)) -#elif JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION _Pragma("diag_suppress 3142") -#else - #define JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNUSED_FUNCTION -#endif - -#if defined(JSON_HEDLEY_DEPRECATED) - #undef JSON_HEDLEY_DEPRECATED -#endif -#if defined(JSON_HEDLEY_DEPRECATED_FOR) - #undef JSON_HEDLEY_DEPRECATED_FOR -#endif -#if \ - JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated("Since " # since)) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated("Since " #since "; use " #replacement)) -#elif \ - (JSON_HEDLEY_HAS_EXTENSION(attribute_deprecated_with_message) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,13,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(18,1,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(18,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,3,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__("Since " #since))) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__("Since " #since "; use " #replacement))) -#elif defined(__cplusplus) && (__cplusplus >= 201402L) - #define JSON_HEDLEY_DEPRECATED(since) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since)]]) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[deprecated("Since " #since "; use " #replacement)]]) -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(deprecated) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) - #define JSON_HEDLEY_DEPRECATED(since) __attribute__((__deprecated__)) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __attribute__((__deprecated__)) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_PELLES_VERSION_CHECK(6,50,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_DEPRECATED(since) __declspec(deprecated) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) __declspec(deprecated) -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_DEPRECATED(since) _Pragma("deprecated") - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) _Pragma("deprecated") -#else - #define JSON_HEDLEY_DEPRECATED(since) - #define JSON_HEDLEY_DEPRECATED_FOR(since, replacement) -#endif - -#if defined(JSON_HEDLEY_UNAVAILABLE) - #undef JSON_HEDLEY_UNAVAILABLE -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(warning) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,3,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_UNAVAILABLE(available_since) __attribute__((__warning__("Not available until " #available_since))) -#else - #define JSON_HEDLEY_UNAVAILABLE(available_since) -#endif - -#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT) - #undef JSON_HEDLEY_WARN_UNUSED_RESULT -#endif -#if defined(JSON_HEDLEY_WARN_UNUSED_RESULT_MSG) - #undef JSON_HEDLEY_WARN_UNUSED_RESULT_MSG -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(warn_unused_result) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) __attribute__((__warn_unused_result__)) -#elif (JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) >= 201907L) - #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard(msg)]]) -#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(nodiscard) - #define JSON_HEDLEY_WARN_UNUSED_RESULT JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[nodiscard]]) -#elif defined(_Check_return_) /* SAL */ - #define JSON_HEDLEY_WARN_UNUSED_RESULT _Check_return_ - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) _Check_return_ -#else - #define JSON_HEDLEY_WARN_UNUSED_RESULT - #define JSON_HEDLEY_WARN_UNUSED_RESULT_MSG(msg) -#endif - -#if defined(JSON_HEDLEY_SENTINEL) - #undef JSON_HEDLEY_SENTINEL -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(sentinel) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_SENTINEL(position) __attribute__((__sentinel__(position))) -#else - #define JSON_HEDLEY_SENTINEL(position) -#endif - -#if defined(JSON_HEDLEY_NO_RETURN) - #undef JSON_HEDLEY_NO_RETURN -#endif -#if JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_NO_RETURN __noreturn -#elif \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) -#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L - #define JSON_HEDLEY_NO_RETURN _Noreturn -#elif defined(__cplusplus) && (__cplusplus >= 201103L) - #define JSON_HEDLEY_NO_RETURN JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[noreturn]]) -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(noreturn) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,2,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) - #define JSON_HEDLEY_NO_RETURN __attribute__((__noreturn__)) -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) - #define JSON_HEDLEY_NO_RETURN _Pragma("does_not_return") -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) -#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) - #define JSON_HEDLEY_NO_RETURN _Pragma("FUNC_NEVER_RETURNS;") -#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) - #define JSON_HEDLEY_NO_RETURN __attribute((noreturn)) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) - #define JSON_HEDLEY_NO_RETURN __declspec(noreturn) -#else - #define JSON_HEDLEY_NO_RETURN -#endif - -#if defined(JSON_HEDLEY_NO_ESCAPE) - #undef JSON_HEDLEY_NO_ESCAPE -#endif -#if JSON_HEDLEY_HAS_ATTRIBUTE(noescape) - #define JSON_HEDLEY_NO_ESCAPE __attribute__((__noescape__)) -#else - #define JSON_HEDLEY_NO_ESCAPE -#endif - -#if defined(JSON_HEDLEY_UNREACHABLE) - #undef JSON_HEDLEY_UNREACHABLE -#endif -#if defined(JSON_HEDLEY_UNREACHABLE_RETURN) - #undef JSON_HEDLEY_UNREACHABLE_RETURN -#endif -#if defined(JSON_HEDLEY_ASSUME) - #undef JSON_HEDLEY_ASSUME -#endif -#if \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_ASSUME(expr) __assume(expr) -#elif JSON_HEDLEY_HAS_BUILTIN(__builtin_assume) - #define JSON_HEDLEY_ASSUME(expr) __builtin_assume(expr) -#elif \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) - #if defined(__cplusplus) - #define JSON_HEDLEY_ASSUME(expr) std::_nassert(expr) - #else - #define JSON_HEDLEY_ASSUME(expr) _nassert(expr) - #endif -#endif -#if \ - (JSON_HEDLEY_HAS_BUILTIN(__builtin_unreachable) && (!defined(JSON_HEDLEY_ARM_VERSION))) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,5,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(18,10,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,5) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(10,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_UNREACHABLE() __builtin_unreachable() -#elif defined(JSON_HEDLEY_ASSUME) - #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) -#endif -#if !defined(JSON_HEDLEY_ASSUME) - #if defined(JSON_HEDLEY_UNREACHABLE) - #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, ((expr) ? 1 : (JSON_HEDLEY_UNREACHABLE(), 1))) - #else - #define JSON_HEDLEY_ASSUME(expr) JSON_HEDLEY_STATIC_CAST(void, expr) - #endif -#endif -#if defined(JSON_HEDLEY_UNREACHABLE) - #if \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) - #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (JSON_HEDLEY_STATIC_CAST(void, JSON_HEDLEY_ASSUME(0)), (value)) - #else - #define JSON_HEDLEY_UNREACHABLE_RETURN(value) JSON_HEDLEY_UNREACHABLE() - #endif -#else - #define JSON_HEDLEY_UNREACHABLE_RETURN(value) return (value) -#endif -#if !defined(JSON_HEDLEY_UNREACHABLE) - #define JSON_HEDLEY_UNREACHABLE() JSON_HEDLEY_ASSUME(0) -#endif - -JSON_HEDLEY_DIAGNOSTIC_PUSH -#if JSON_HEDLEY_HAS_WARNING("-Wpedantic") - #pragma clang diagnostic ignored "-Wpedantic" -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wc++98-compat-pedantic") && defined(__cplusplus) - #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" -#endif -#if JSON_HEDLEY_GCC_HAS_WARNING("-Wvariadic-macros",4,0,0) - #if defined(__clang__) - #pragma clang diagnostic ignored "-Wvariadic-macros" - #elif defined(JSON_HEDLEY_GCC_VERSION) - #pragma GCC diagnostic ignored "-Wvariadic-macros" - #endif -#endif -#if defined(JSON_HEDLEY_NON_NULL) - #undef JSON_HEDLEY_NON_NULL -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(nonnull) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) - #define JSON_HEDLEY_NON_NULL(...) __attribute__((__nonnull__(__VA_ARGS__))) -#else - #define JSON_HEDLEY_NON_NULL(...) -#endif -JSON_HEDLEY_DIAGNOSTIC_POP - -#if defined(JSON_HEDLEY_PRINTF_FORMAT) - #undef JSON_HEDLEY_PRINTF_FORMAT -#endif -#if defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && !defined(__USE_MINGW_ANSI_STDIO) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(ms_printf, string_idx, first_to_check))) -#elif defined(__MINGW32__) && JSON_HEDLEY_GCC_HAS_ATTRIBUTE(format,4,4,0) && defined(__USE_MINGW_ANSI_STDIO) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(gnu_printf, string_idx, first_to_check))) -#elif \ - JSON_HEDLEY_HAS_ATTRIBUTE(format) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,6,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __attribute__((__format__(__printf__, string_idx, first_to_check))) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(6,0,0) - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) __declspec(vaformat(printf,string_idx,first_to_check)) -#else - #define JSON_HEDLEY_PRINTF_FORMAT(string_idx,first_to_check) -#endif - -#if defined(JSON_HEDLEY_CONSTEXPR) - #undef JSON_HEDLEY_CONSTEXPR -#endif -#if defined(__cplusplus) - #if __cplusplus >= 201103L - #define JSON_HEDLEY_CONSTEXPR JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(constexpr) - #endif -#endif -#if !defined(JSON_HEDLEY_CONSTEXPR) - #define JSON_HEDLEY_CONSTEXPR -#endif - -#if defined(JSON_HEDLEY_PREDICT) - #undef JSON_HEDLEY_PREDICT -#endif -#if defined(JSON_HEDLEY_LIKELY) - #undef JSON_HEDLEY_LIKELY -#endif -#if defined(JSON_HEDLEY_UNLIKELY) - #undef JSON_HEDLEY_UNLIKELY -#endif -#if defined(JSON_HEDLEY_UNPREDICTABLE) - #undef JSON_HEDLEY_UNPREDICTABLE -#endif -#if JSON_HEDLEY_HAS_BUILTIN(__builtin_unpredictable) - #define JSON_HEDLEY_UNPREDICTABLE(expr) __builtin_unpredictable((expr)) -#endif -#if \ - (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect_with_probability) && !defined(JSON_HEDLEY_PGI_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(9,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) -# define JSON_HEDLEY_PREDICT(expr, value, probability) __builtin_expect_with_probability( (expr), (value), (probability)) -# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) __builtin_expect_with_probability(!!(expr), 1 , (probability)) -# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) __builtin_expect_with_probability(!!(expr), 0 , (probability)) -# define JSON_HEDLEY_LIKELY(expr) __builtin_expect (!!(expr), 1 ) -# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect (!!(expr), 0 ) -#elif \ - (JSON_HEDLEY_HAS_BUILTIN(__builtin_expect) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,15,0) && defined(__cplusplus)) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,7,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,27) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) -# define JSON_HEDLEY_PREDICT(expr, expected, probability) \ - (((probability) >= 0.9) ? __builtin_expect((expr), (expected)) : (JSON_HEDLEY_STATIC_CAST(void, expected), (expr))) -# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) \ - (__extension__ ({ \ - double hedley_probability_ = (probability); \ - ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 1) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 0) : !!(expr))); \ - })) -# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) \ - (__extension__ ({ \ - double hedley_probability_ = (probability); \ - ((hedley_probability_ >= 0.9) ? __builtin_expect(!!(expr), 0) : ((hedley_probability_ <= 0.1) ? __builtin_expect(!!(expr), 1) : !!(expr))); \ - })) -# define JSON_HEDLEY_LIKELY(expr) __builtin_expect(!!(expr), 1) -# define JSON_HEDLEY_UNLIKELY(expr) __builtin_expect(!!(expr), 0) -#else -# define JSON_HEDLEY_PREDICT(expr, expected, probability) (JSON_HEDLEY_STATIC_CAST(void, expected), (expr)) -# define JSON_HEDLEY_PREDICT_TRUE(expr, probability) (!!(expr)) -# define JSON_HEDLEY_PREDICT_FALSE(expr, probability) (!!(expr)) -# define JSON_HEDLEY_LIKELY(expr) (!!(expr)) -# define JSON_HEDLEY_UNLIKELY(expr) (!!(expr)) -#endif -#if !defined(JSON_HEDLEY_UNPREDICTABLE) - #define JSON_HEDLEY_UNPREDICTABLE(expr) JSON_HEDLEY_PREDICT(expr, 1, 0.5) -#endif - -#if defined(JSON_HEDLEY_MALLOC) - #undef JSON_HEDLEY_MALLOC -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(malloc) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_MALLOC __attribute__((__malloc__)) -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) - #define JSON_HEDLEY_MALLOC _Pragma("returns_new_memory") -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_MALLOC __declspec(restrict) -#else - #define JSON_HEDLEY_MALLOC -#endif - -#if defined(JSON_HEDLEY_PURE) - #undef JSON_HEDLEY_PURE -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(pure) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(2,96,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) -# define JSON_HEDLEY_PURE __attribute__((__pure__)) -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) -# define JSON_HEDLEY_PURE _Pragma("does_not_write_global_data") -#elif defined(__cplusplus) && \ - ( \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(2,0,1) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) \ - ) -# define JSON_HEDLEY_PURE _Pragma("FUNC_IS_PURE;") -#else -# define JSON_HEDLEY_PURE -#endif - -#if defined(JSON_HEDLEY_CONST) - #undef JSON_HEDLEY_CONST -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(const) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(2,5,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_CONST __attribute__((__const__)) -#elif \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) - #define JSON_HEDLEY_CONST _Pragma("no_side_effect") -#else - #define JSON_HEDLEY_CONST JSON_HEDLEY_PURE -#endif - -#if defined(JSON_HEDLEY_RESTRICT) - #undef JSON_HEDLEY_RESTRICT -#endif -#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus) - #define JSON_HEDLEY_RESTRICT restrict -#elif \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_MSVC_VERSION_CHECK(14,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(17,10,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,4) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,14,0) && defined(__cplusplus)) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) || \ - defined(__clang__) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_RESTRICT __restrict -#elif JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,3,0) && !defined(__cplusplus) - #define JSON_HEDLEY_RESTRICT _Restrict -#else - #define JSON_HEDLEY_RESTRICT -#endif - -#if defined(JSON_HEDLEY_INLINE) - #undef JSON_HEDLEY_INLINE -#endif -#if \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) || \ - (defined(__cplusplus) && (__cplusplus >= 199711L)) - #define JSON_HEDLEY_INLINE inline -#elif \ - defined(JSON_HEDLEY_GCC_VERSION) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(6,2,0) - #define JSON_HEDLEY_INLINE __inline__ -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,1,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(3,1,0) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,2,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(8,0,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_INLINE __inline -#else - #define JSON_HEDLEY_INLINE -#endif - -#if defined(JSON_HEDLEY_ALWAYS_INLINE) - #undef JSON_HEDLEY_ALWAYS_INLINE -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(always_inline) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) -# define JSON_HEDLEY_ALWAYS_INLINE __attribute__((__always_inline__)) JSON_HEDLEY_INLINE -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(12,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) -# define JSON_HEDLEY_ALWAYS_INLINE __forceinline -#elif defined(__cplusplus) && \ - ( \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) \ - ) -# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("FUNC_ALWAYS_INLINE;") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) -# define JSON_HEDLEY_ALWAYS_INLINE _Pragma("inline=forced") -#else -# define JSON_HEDLEY_ALWAYS_INLINE JSON_HEDLEY_INLINE -#endif - -#if defined(JSON_HEDLEY_NEVER_INLINE) - #undef JSON_HEDLEY_NEVER_INLINE -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(noinline) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(10,1,0) || \ - JSON_HEDLEY_TI_VERSION_CHECK(15,12,0) || \ - (JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(4,8,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_ARMCL_VERSION_CHECK(5,2,0) || \ - (JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL2000_VERSION_CHECK(6,4,0) || \ - (JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,0,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(4,3,0) || \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) || \ - JSON_HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ - JSON_HEDLEY_TI_CLPRU_VERSION_CHECK(2,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ - JSON_HEDLEY_IAR_VERSION_CHECK(8,10,0) - #define JSON_HEDLEY_NEVER_INLINE __attribute__((__noinline__)) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,10,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) -#elif JSON_HEDLEY_PGI_VERSION_CHECK(10,2,0) - #define JSON_HEDLEY_NEVER_INLINE _Pragma("noinline") -#elif JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,0,0) && defined(__cplusplus) - #define JSON_HEDLEY_NEVER_INLINE _Pragma("FUNC_CANNOT_INLINE;") -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) - #define JSON_HEDLEY_NEVER_INLINE _Pragma("inline=never") -#elif JSON_HEDLEY_COMPCERT_VERSION_CHECK(3,2,0) - #define JSON_HEDLEY_NEVER_INLINE __attribute((noinline)) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(9,0,0) - #define JSON_HEDLEY_NEVER_INLINE __declspec(noinline) -#else - #define JSON_HEDLEY_NEVER_INLINE -#endif - -#if defined(JSON_HEDLEY_PRIVATE) - #undef JSON_HEDLEY_PRIVATE -#endif -#if defined(JSON_HEDLEY_PUBLIC) - #undef JSON_HEDLEY_PUBLIC -#endif -#if defined(JSON_HEDLEY_IMPORT) - #undef JSON_HEDLEY_IMPORT -#endif -#if defined(_WIN32) || defined(__CYGWIN__) -# define JSON_HEDLEY_PRIVATE -# define JSON_HEDLEY_PUBLIC __declspec(dllexport) -# define JSON_HEDLEY_IMPORT __declspec(dllimport) -#else -# if \ - JSON_HEDLEY_HAS_ATTRIBUTE(visibility) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ - JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,11,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ - ( \ - defined(__TI_EABI__) && \ - ( \ - (JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,2,0) && defined(__TI_GNU_ATTRIBUTE_SUPPORT__)) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(7,5,0) \ - ) \ - ) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) -# define JSON_HEDLEY_PRIVATE __attribute__((__visibility__("hidden"))) -# define JSON_HEDLEY_PUBLIC __attribute__((__visibility__("default"))) -# else -# define JSON_HEDLEY_PRIVATE -# define JSON_HEDLEY_PUBLIC -# endif -# define JSON_HEDLEY_IMPORT extern -#endif - -#if defined(JSON_HEDLEY_NO_THROW) - #undef JSON_HEDLEY_NO_THROW -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(nothrow) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,3,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_NO_THROW __attribute__((__nothrow__)) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) - #define JSON_HEDLEY_NO_THROW __declspec(nothrow) -#else - #define JSON_HEDLEY_NO_THROW -#endif - -#if defined(JSON_HEDLEY_FALL_THROUGH) - #undef JSON_HEDLEY_FALL_THROUGH -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(fallthrough) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(7,0,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_FALL_THROUGH __attribute__((__fallthrough__)) -#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE_NS(clang,fallthrough) - #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[clang::fallthrough]]) -#elif JSON_HEDLEY_HAS_CPP_ATTRIBUTE(fallthrough) - #define JSON_HEDLEY_FALL_THROUGH JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_([[fallthrough]]) -#elif defined(__fallthrough) /* SAL */ - #define JSON_HEDLEY_FALL_THROUGH __fallthrough -#else - #define JSON_HEDLEY_FALL_THROUGH -#endif - -#if defined(JSON_HEDLEY_RETURNS_NON_NULL) - #undef JSON_HEDLEY_RETURNS_NON_NULL -#endif -#if \ - JSON_HEDLEY_HAS_ATTRIBUTE(returns_nonnull) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_RETURNS_NON_NULL __attribute__((__returns_nonnull__)) -#elif defined(_Ret_notnull_) /* SAL */ - #define JSON_HEDLEY_RETURNS_NON_NULL _Ret_notnull_ -#else - #define JSON_HEDLEY_RETURNS_NON_NULL -#endif - -#if defined(JSON_HEDLEY_ARRAY_PARAM) - #undef JSON_HEDLEY_ARRAY_PARAM -#endif -#if \ - defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) && \ - !defined(__STDC_NO_VLA__) && \ - !defined(__cplusplus) && \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_TINYC_VERSION) - #define JSON_HEDLEY_ARRAY_PARAM(name) (name) -#else - #define JSON_HEDLEY_ARRAY_PARAM(name) -#endif - -#if defined(JSON_HEDLEY_IS_CONSTANT) - #undef JSON_HEDLEY_IS_CONSTANT -#endif -#if defined(JSON_HEDLEY_REQUIRE_CONSTEXPR) - #undef JSON_HEDLEY_REQUIRE_CONSTEXPR -#endif -/* JSON_HEDLEY_IS_CONSTEXPR_ is for - HEDLEY INTERNAL USE ONLY. API subject to change without notice. */ -#if defined(JSON_HEDLEY_IS_CONSTEXPR_) - #undef JSON_HEDLEY_IS_CONSTEXPR_ -#endif -#if \ - JSON_HEDLEY_HAS_BUILTIN(__builtin_constant_p) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,19) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(4,1,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_TI_CL6X_VERSION_CHECK(6,1,0) || \ - (JSON_HEDLEY_SUNPRO_VERSION_CHECK(5,10,0) && !defined(__cplusplus)) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) - #define JSON_HEDLEY_IS_CONSTANT(expr) __builtin_constant_p(expr) -#endif -#if !defined(__cplusplus) -# if \ - JSON_HEDLEY_HAS_BUILTIN(__builtin_types_compatible_p) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(3,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(13,1,0) || \ - JSON_HEDLEY_CRAY_VERSION_CHECK(8,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,4,0) || \ - JSON_HEDLEY_TINYC_VERSION_CHECK(0,9,24) -#if defined(__INTPTR_TYPE__) - #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0)), int*) -#else - #include - #define JSON_HEDLEY_IS_CONSTEXPR_(expr) __builtin_types_compatible_p(__typeof__((1 ? (void*) ((intptr_t) ((expr) * 0)) : (int*) 0)), int*) -#endif -# elif \ - ( \ - defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && \ - !defined(JSON_HEDLEY_SUNPRO_VERSION) && \ - !defined(JSON_HEDLEY_PGI_VERSION) && \ - !defined(JSON_HEDLEY_IAR_VERSION)) || \ - (JSON_HEDLEY_HAS_EXTENSION(c_generic_selections) && !defined(JSON_HEDLEY_IAR_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,9,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(17,0,0) || \ - JSON_HEDLEY_IBM_VERSION_CHECK(12,1,0) || \ - JSON_HEDLEY_ARM_VERSION_CHECK(5,3,0) -#if defined(__INTPTR_TYPE__) - #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((__INTPTR_TYPE__) ((expr) * 0)) : (int*) 0), int*: 1, void*: 0) -#else - #include - #define JSON_HEDLEY_IS_CONSTEXPR_(expr) _Generic((1 ? (void*) ((intptr_t) * 0) : (int*) 0), int*: 1, void*: 0) -#endif -# elif \ - defined(JSON_HEDLEY_GCC_VERSION) || \ - defined(JSON_HEDLEY_INTEL_VERSION) || \ - defined(JSON_HEDLEY_TINYC_VERSION) || \ - defined(JSON_HEDLEY_TI_ARMCL_VERSION) || \ - JSON_HEDLEY_TI_CL430_VERSION_CHECK(18,12,0) || \ - defined(JSON_HEDLEY_TI_CL2000_VERSION) || \ - defined(JSON_HEDLEY_TI_CL6X_VERSION) || \ - defined(JSON_HEDLEY_TI_CL7X_VERSION) || \ - defined(JSON_HEDLEY_TI_CLPRU_VERSION) || \ - defined(__clang__) -# define JSON_HEDLEY_IS_CONSTEXPR_(expr) ( \ - sizeof(void) != \ - sizeof(*( \ - 1 ? \ - ((void*) ((expr) * 0L) ) : \ -((struct { char v[sizeof(void) * 2]; } *) 1) \ - ) \ - ) \ - ) -# endif -#endif -#if defined(JSON_HEDLEY_IS_CONSTEXPR_) - #if !defined(JSON_HEDLEY_IS_CONSTANT) - #define JSON_HEDLEY_IS_CONSTANT(expr) JSON_HEDLEY_IS_CONSTEXPR_(expr) - #endif - #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (JSON_HEDLEY_IS_CONSTEXPR_(expr) ? (expr) : (-1)) -#else - #if !defined(JSON_HEDLEY_IS_CONSTANT) - #define JSON_HEDLEY_IS_CONSTANT(expr) (0) - #endif - #define JSON_HEDLEY_REQUIRE_CONSTEXPR(expr) (expr) -#endif - -#if defined(JSON_HEDLEY_BEGIN_C_DECLS) - #undef JSON_HEDLEY_BEGIN_C_DECLS -#endif -#if defined(JSON_HEDLEY_END_C_DECLS) - #undef JSON_HEDLEY_END_C_DECLS -#endif -#if defined(JSON_HEDLEY_C_DECL) - #undef JSON_HEDLEY_C_DECL -#endif -#if defined(__cplusplus) - #define JSON_HEDLEY_BEGIN_C_DECLS extern "C" { - #define JSON_HEDLEY_END_C_DECLS } - #define JSON_HEDLEY_C_DECL extern "C" -#else - #define JSON_HEDLEY_BEGIN_C_DECLS - #define JSON_HEDLEY_END_C_DECLS - #define JSON_HEDLEY_C_DECL -#endif - -#if defined(JSON_HEDLEY_STATIC_ASSERT) - #undef JSON_HEDLEY_STATIC_ASSERT -#endif -#if \ - !defined(__cplusplus) && ( \ - (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) || \ - (JSON_HEDLEY_HAS_FEATURE(c_static_assert) && !defined(JSON_HEDLEY_INTEL_CL_VERSION)) || \ - JSON_HEDLEY_GCC_VERSION_CHECK(6,0,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) || \ - defined(_Static_assert) \ - ) -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) _Static_assert(expr, message) -#elif \ - (defined(__cplusplus) && (__cplusplus >= 201103L)) || \ - JSON_HEDLEY_MSVC_VERSION_CHECK(16,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(static_assert(expr, message)) -#else -# define JSON_HEDLEY_STATIC_ASSERT(expr, message) -#endif - -#if defined(JSON_HEDLEY_NULL) - #undef JSON_HEDLEY_NULL -#endif -#if defined(__cplusplus) - #if __cplusplus >= 201103L - #define JSON_HEDLEY_NULL JSON_HEDLEY_DIAGNOSTIC_DISABLE_CPP98_COMPAT_WRAP_(nullptr) - #elif defined(NULL) - #define JSON_HEDLEY_NULL NULL - #else - #define JSON_HEDLEY_NULL JSON_HEDLEY_STATIC_CAST(void*, 0) - #endif -#elif defined(NULL) - #define JSON_HEDLEY_NULL NULL -#else - #define JSON_HEDLEY_NULL ((void*) 0) -#endif - -#if defined(JSON_HEDLEY_MESSAGE) - #undef JSON_HEDLEY_MESSAGE -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") -# define JSON_HEDLEY_MESSAGE(msg) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ - JSON_HEDLEY_PRAGMA(message msg) \ - JSON_HEDLEY_DIAGNOSTIC_POP -#elif \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message msg) -#elif JSON_HEDLEY_CRAY_VERSION_CHECK(5,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(_CRI message msg) -#elif JSON_HEDLEY_IAR_VERSION_CHECK(8,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) -#elif JSON_HEDLEY_PELLES_VERSION_CHECK(2,0,0) -# define JSON_HEDLEY_MESSAGE(msg) JSON_HEDLEY_PRAGMA(message(msg)) -#else -# define JSON_HEDLEY_MESSAGE(msg) -#endif - -#if defined(JSON_HEDLEY_WARNING) - #undef JSON_HEDLEY_WARNING -#endif -#if JSON_HEDLEY_HAS_WARNING("-Wunknown-pragmas") -# define JSON_HEDLEY_WARNING(msg) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - JSON_HEDLEY_DIAGNOSTIC_DISABLE_UNKNOWN_PRAGMAS \ - JSON_HEDLEY_PRAGMA(clang warning msg) \ - JSON_HEDLEY_DIAGNOSTIC_POP -#elif \ - JSON_HEDLEY_GCC_VERSION_CHECK(4,8,0) || \ - JSON_HEDLEY_PGI_VERSION_CHECK(18,4,0) || \ - JSON_HEDLEY_INTEL_VERSION_CHECK(13,0,0) -# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(GCC warning msg) -#elif \ - JSON_HEDLEY_MSVC_VERSION_CHECK(15,0,0) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) -# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_PRAGMA(message(msg)) -#else -# define JSON_HEDLEY_WARNING(msg) JSON_HEDLEY_MESSAGE(msg) -#endif - -#if defined(JSON_HEDLEY_REQUIRE) - #undef JSON_HEDLEY_REQUIRE -#endif -#if defined(JSON_HEDLEY_REQUIRE_MSG) - #undef JSON_HEDLEY_REQUIRE_MSG -#endif -#if JSON_HEDLEY_HAS_ATTRIBUTE(diagnose_if) -# if JSON_HEDLEY_HAS_WARNING("-Wgcc-compat") -# define JSON_HEDLEY_REQUIRE(expr) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ - __attribute__((diagnose_if(!(expr), #expr, "error"))) \ - JSON_HEDLEY_DIAGNOSTIC_POP -# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("clang diagnostic ignored \"-Wgcc-compat\"") \ - __attribute__((diagnose_if(!(expr), msg, "error"))) \ - JSON_HEDLEY_DIAGNOSTIC_POP -# else -# define JSON_HEDLEY_REQUIRE(expr) __attribute__((diagnose_if(!(expr), #expr, "error"))) -# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) __attribute__((diagnose_if(!(expr), msg, "error"))) -# endif -#else -# define JSON_HEDLEY_REQUIRE(expr) -# define JSON_HEDLEY_REQUIRE_MSG(expr,msg) -#endif - -#if defined(JSON_HEDLEY_FLAGS) - #undef JSON_HEDLEY_FLAGS -#endif -#if JSON_HEDLEY_HAS_ATTRIBUTE(flag_enum) && (!defined(__cplusplus) || JSON_HEDLEY_HAS_WARNING("-Wbitfield-enum-conversion")) - #define JSON_HEDLEY_FLAGS __attribute__((__flag_enum__)) -#else - #define JSON_HEDLEY_FLAGS -#endif - -#if defined(JSON_HEDLEY_FLAGS_CAST) - #undef JSON_HEDLEY_FLAGS_CAST -#endif -#if JSON_HEDLEY_INTEL_VERSION_CHECK(19,0,0) -# define JSON_HEDLEY_FLAGS_CAST(T, expr) (__extension__ ({ \ - JSON_HEDLEY_DIAGNOSTIC_PUSH \ - _Pragma("warning(disable:188)") \ - ((T) (expr)); \ - JSON_HEDLEY_DIAGNOSTIC_POP \ - })) -#else -# define JSON_HEDLEY_FLAGS_CAST(T, expr) JSON_HEDLEY_STATIC_CAST(T, expr) -#endif - -#if defined(JSON_HEDLEY_EMPTY_BASES) - #undef JSON_HEDLEY_EMPTY_BASES -#endif -#if \ - (JSON_HEDLEY_MSVC_VERSION_CHECK(19,0,23918) && !JSON_HEDLEY_MSVC_VERSION_CHECK(20,0,0)) || \ - JSON_HEDLEY_INTEL_CL_VERSION_CHECK(2021,1,0) - #define JSON_HEDLEY_EMPTY_BASES __declspec(empty_bases) -#else - #define JSON_HEDLEY_EMPTY_BASES -#endif - -/* Remaining macros are deprecated. */ - -#if defined(JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK) - #undef JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK -#endif -#if defined(__clang__) - #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) (0) -#else - #define JSON_HEDLEY_GCC_NOT_CLANG_VERSION_CHECK(major,minor,patch) JSON_HEDLEY_GCC_VERSION_CHECK(major,minor,patch) -#endif - -#if defined(JSON_HEDLEY_CLANG_HAS_ATTRIBUTE) - #undef JSON_HEDLEY_CLANG_HAS_ATTRIBUTE -#endif -#define JSON_HEDLEY_CLANG_HAS_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_ATTRIBUTE(attribute) - -#if defined(JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE) - #undef JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE -#endif -#define JSON_HEDLEY_CLANG_HAS_CPP_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_CPP_ATTRIBUTE(attribute) - -#if defined(JSON_HEDLEY_CLANG_HAS_BUILTIN) - #undef JSON_HEDLEY_CLANG_HAS_BUILTIN -#endif -#define JSON_HEDLEY_CLANG_HAS_BUILTIN(builtin) JSON_HEDLEY_HAS_BUILTIN(builtin) - -#if defined(JSON_HEDLEY_CLANG_HAS_FEATURE) - #undef JSON_HEDLEY_CLANG_HAS_FEATURE -#endif -#define JSON_HEDLEY_CLANG_HAS_FEATURE(feature) JSON_HEDLEY_HAS_FEATURE(feature) - -#if defined(JSON_HEDLEY_CLANG_HAS_EXTENSION) - #undef JSON_HEDLEY_CLANG_HAS_EXTENSION -#endif -#define JSON_HEDLEY_CLANG_HAS_EXTENSION(extension) JSON_HEDLEY_HAS_EXTENSION(extension) - -#if defined(JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE) - #undef JSON_HEDLEY_CLANG_HAS_DECLSPEC_DECLSPEC_ATTRIBUTE -#endif -#define JSON_HEDLEY_CLANG_HAS_DECLSPEC_ATTRIBUTE(attribute) JSON_HEDLEY_HAS_DECLSPEC_ATTRIBUTE(attribute) - -#if defined(JSON_HEDLEY_CLANG_HAS_WARNING) - #undef JSON_HEDLEY_CLANG_HAS_WARNING -#endif -#define JSON_HEDLEY_CLANG_HAS_WARNING(warning) JSON_HEDLEY_HAS_WARNING(warning) - -#endif /* !defined(JSON_HEDLEY_VERSION) || (JSON_HEDLEY_VERSION < X) */ - - -// This file contains all internal macro definitions (except those affecting ABI) -// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them - -// #include - - -// exclude unsupported compilers -#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK) - #if defined(__clang__) - #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400 - #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER)) - #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800 - #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers" - #endif - #endif -#endif - -// C++ language standard detection -// if the user manually specified the used c++ version this is skipped -#if !defined(JSON_HAS_CPP_20) && !defined(JSON_HAS_CPP_17) && !defined(JSON_HAS_CPP_14) && !defined(JSON_HAS_CPP_11) - #if (defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) - #define JSON_HAS_CPP_20 - #define JSON_HAS_CPP_17 - #define JSON_HAS_CPP_14 - #elif (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464 - #define JSON_HAS_CPP_17 - #define JSON_HAS_CPP_14 - #elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1) - #define JSON_HAS_CPP_14 - #endif - // the cpp 11 flag is always specified because it is the minimal required version - #define JSON_HAS_CPP_11 -#endif - -#ifdef __has_include - #if __has_include() - #include - #endif -#endif - -#if !defined(JSON_HAS_FILESYSTEM) && !defined(JSON_HAS_EXPERIMENTAL_FILESYSTEM) - #ifdef JSON_HAS_CPP_17 - #if defined(__cpp_lib_filesystem) - #define JSON_HAS_FILESYSTEM 1 - #elif defined(__cpp_lib_experimental_filesystem) - #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 - #elif !defined(__has_include) - #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 - #elif __has_include() - #define JSON_HAS_FILESYSTEM 1 - #elif __has_include() - #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 1 - #endif - - // std::filesystem does not work on MinGW GCC 8: https://sourceforge.net/p/mingw-w64/bugs/737/ - #if defined(__MINGW32__) && defined(__GNUC__) && __GNUC__ == 8 - #undef JSON_HAS_FILESYSTEM - #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM - #endif - - // no filesystem support before GCC 8: https://en.cppreference.com/w/cpp/compiler_support - #if defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 8 - #undef JSON_HAS_FILESYSTEM - #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM - #endif - - // no filesystem support before Clang 7: https://en.cppreference.com/w/cpp/compiler_support - #if defined(__clang_major__) && __clang_major__ < 7 - #undef JSON_HAS_FILESYSTEM - #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM - #endif - - // no filesystem support before MSVC 19.14: https://en.cppreference.com/w/cpp/compiler_support - #if defined(_MSC_VER) && _MSC_VER < 1914 - #undef JSON_HAS_FILESYSTEM - #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM - #endif - - // no filesystem support before iOS 13 - #if defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 130000 - #undef JSON_HAS_FILESYSTEM - #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM - #endif - - // no filesystem support before macOS Catalina - #if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 101500 - #undef JSON_HAS_FILESYSTEM - #undef JSON_HAS_EXPERIMENTAL_FILESYSTEM - #endif - #endif -#endif - -#ifndef JSON_HAS_EXPERIMENTAL_FILESYSTEM - #define JSON_HAS_EXPERIMENTAL_FILESYSTEM 0 -#endif - -#ifndef JSON_HAS_FILESYSTEM - #define JSON_HAS_FILESYSTEM 0 -#endif - -#ifndef JSON_HAS_THREE_WAY_COMPARISON - #if defined(__cpp_impl_three_way_comparison) && __cpp_impl_three_way_comparison >= 201907L \ - && defined(__cpp_lib_three_way_comparison) && __cpp_lib_three_way_comparison >= 201907L - #define JSON_HAS_THREE_WAY_COMPARISON 1 - #else - #define JSON_HAS_THREE_WAY_COMPARISON 0 - #endif -#endif - -#ifndef JSON_HAS_RANGES - // ranges header shipping in GCC 11.1.0 (released 2021-04-27) has syntax error - #if defined(__GLIBCXX__) && __GLIBCXX__ == 20210427 - #define JSON_HAS_RANGES 0 - #elif defined(__cpp_lib_ranges) - #define JSON_HAS_RANGES 1 - #else - #define JSON_HAS_RANGES 0 - #endif -#endif - -#ifdef JSON_HAS_CPP_17 - #define JSON_INLINE_VARIABLE inline -#else - #define JSON_INLINE_VARIABLE -#endif - -#if JSON_HEDLEY_HAS_ATTRIBUTE(no_unique_address) - #define JSON_NO_UNIQUE_ADDRESS [[no_unique_address]] -#else - #define JSON_NO_UNIQUE_ADDRESS -#endif - -// disable documentation warnings on clang -#if defined(__clang__) - #pragma clang diagnostic push - #pragma clang diagnostic ignored "-Wdocumentation" - #pragma clang diagnostic ignored "-Wdocumentation-unknown-command" -#endif - -// allow disabling exceptions -#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION) - #define JSON_THROW(exception) throw exception - #define JSON_TRY try - #define JSON_CATCH(exception) catch(exception) - #define JSON_INTERNAL_CATCH(exception) catch(exception) -#else - #include - #define JSON_THROW(exception) std::abort() - #define JSON_TRY if(true) - #define JSON_CATCH(exception) if(false) - #define JSON_INTERNAL_CATCH(exception) if(false) -#endif - -// override exception macros -#if defined(JSON_THROW_USER) - #undef JSON_THROW - #define JSON_THROW JSON_THROW_USER -#endif -#if defined(JSON_TRY_USER) - #undef JSON_TRY - #define JSON_TRY JSON_TRY_USER -#endif -#if defined(JSON_CATCH_USER) - #undef JSON_CATCH - #define JSON_CATCH JSON_CATCH_USER - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_CATCH_USER -#endif -#if defined(JSON_INTERNAL_CATCH_USER) - #undef JSON_INTERNAL_CATCH - #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER -#endif - -// allow overriding assert -#if !defined(JSON_ASSERT) - #include // assert - #define JSON_ASSERT(x) assert(x) -#endif - -// allow to access some private functions (needed by the test suite) -#if defined(JSON_TESTS_PRIVATE) - #define JSON_PRIVATE_UNLESS_TESTED public -#else - #define JSON_PRIVATE_UNLESS_TESTED private -#endif - -/*! -@brief macro to briefly define a mapping between an enum and JSON -@def NLOHMANN_JSON_SERIALIZE_ENUM -@since version 3.4.0 -*/ -#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...) \ - template \ - inline void to_json(BasicJsonType& j, const ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [e](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.first == e; \ - }); \ - j = ((it != std::end(m)) ? it : std::begin(m))->second; \ - } \ - template \ - inline void from_json(const BasicJsonType& j, ENUM_TYPE& e) \ - { \ - static_assert(std::is_enum::value, #ENUM_TYPE " must be an enum!"); \ - static const std::pair m[] = __VA_ARGS__; \ - auto it = std::find_if(std::begin(m), std::end(m), \ - [&j](const std::pair& ej_pair) -> bool \ - { \ - return ej_pair.second == j; \ - }); \ - e = ((it != std::end(m)) ? it : std::begin(m))->first; \ - } - -// Ugly macros to avoid uglier copy-paste when specializing basic_json. They -// may be removed in the future once the class is split. - -#define NLOHMANN_BASIC_JSON_TPL_DECLARATION \ - template class ObjectType, \ - template class ArrayType, \ - class StringType, class BooleanType, class NumberIntegerType, \ - class NumberUnsignedType, class NumberFloatType, \ - template class AllocatorType, \ - template class JSONSerializer, \ - class BinaryType> - -#define NLOHMANN_BASIC_JSON_TPL \ - basic_json - -// Macros to simplify conversion from/to types - -#define NLOHMANN_JSON_EXPAND( x ) x -#define NLOHMANN_JSON_GET_MACRO(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, _61, _62, _63, _64, NAME,...) NAME -#define NLOHMANN_JSON_PASTE(...) NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_GET_MACRO(__VA_ARGS__, \ - NLOHMANN_JSON_PASTE64, \ - NLOHMANN_JSON_PASTE63, \ - NLOHMANN_JSON_PASTE62, \ - NLOHMANN_JSON_PASTE61, \ - NLOHMANN_JSON_PASTE60, \ - NLOHMANN_JSON_PASTE59, \ - NLOHMANN_JSON_PASTE58, \ - NLOHMANN_JSON_PASTE57, \ - NLOHMANN_JSON_PASTE56, \ - NLOHMANN_JSON_PASTE55, \ - NLOHMANN_JSON_PASTE54, \ - NLOHMANN_JSON_PASTE53, \ - NLOHMANN_JSON_PASTE52, \ - NLOHMANN_JSON_PASTE51, \ - NLOHMANN_JSON_PASTE50, \ - NLOHMANN_JSON_PASTE49, \ - NLOHMANN_JSON_PASTE48, \ - NLOHMANN_JSON_PASTE47, \ - NLOHMANN_JSON_PASTE46, \ - NLOHMANN_JSON_PASTE45, \ - NLOHMANN_JSON_PASTE44, \ - NLOHMANN_JSON_PASTE43, \ - NLOHMANN_JSON_PASTE42, \ - NLOHMANN_JSON_PASTE41, \ - NLOHMANN_JSON_PASTE40, \ - NLOHMANN_JSON_PASTE39, \ - NLOHMANN_JSON_PASTE38, \ - NLOHMANN_JSON_PASTE37, \ - NLOHMANN_JSON_PASTE36, \ - NLOHMANN_JSON_PASTE35, \ - NLOHMANN_JSON_PASTE34, \ - NLOHMANN_JSON_PASTE33, \ - NLOHMANN_JSON_PASTE32, \ - NLOHMANN_JSON_PASTE31, \ - NLOHMANN_JSON_PASTE30, \ - NLOHMANN_JSON_PASTE29, \ - NLOHMANN_JSON_PASTE28, \ - NLOHMANN_JSON_PASTE27, \ - NLOHMANN_JSON_PASTE26, \ - NLOHMANN_JSON_PASTE25, \ - NLOHMANN_JSON_PASTE24, \ - NLOHMANN_JSON_PASTE23, \ - NLOHMANN_JSON_PASTE22, \ - NLOHMANN_JSON_PASTE21, \ - NLOHMANN_JSON_PASTE20, \ - NLOHMANN_JSON_PASTE19, \ - NLOHMANN_JSON_PASTE18, \ - NLOHMANN_JSON_PASTE17, \ - NLOHMANN_JSON_PASTE16, \ - NLOHMANN_JSON_PASTE15, \ - NLOHMANN_JSON_PASTE14, \ - NLOHMANN_JSON_PASTE13, \ - NLOHMANN_JSON_PASTE12, \ - NLOHMANN_JSON_PASTE11, \ - NLOHMANN_JSON_PASTE10, \ - NLOHMANN_JSON_PASTE9, \ - NLOHMANN_JSON_PASTE8, \ - NLOHMANN_JSON_PASTE7, \ - NLOHMANN_JSON_PASTE6, \ - NLOHMANN_JSON_PASTE5, \ - NLOHMANN_JSON_PASTE4, \ - NLOHMANN_JSON_PASTE3, \ - NLOHMANN_JSON_PASTE2, \ - NLOHMANN_JSON_PASTE1)(__VA_ARGS__)) -#define NLOHMANN_JSON_PASTE2(func, v1) func(v1) -#define NLOHMANN_JSON_PASTE3(func, v1, v2) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE2(func, v2) -#define NLOHMANN_JSON_PASTE4(func, v1, v2, v3) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE3(func, v2, v3) -#define NLOHMANN_JSON_PASTE5(func, v1, v2, v3, v4) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE4(func, v2, v3, v4) -#define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5) -#define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6) -#define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7) -#define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8) -#define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v6, v7, v8, v9) -#define NLOHMANN_JSON_PASTE11(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE10(func, v2, v3, v4, v5, v6, v7, v8, v9, v10) -#define NLOHMANN_JSON_PASTE12(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE11(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) -#define NLOHMANN_JSON_PASTE13(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE12(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12) -#define NLOHMANN_JSON_PASTE14(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE13(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13) -#define NLOHMANN_JSON_PASTE15(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE14(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14) -#define NLOHMANN_JSON_PASTE16(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE15(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15) -#define NLOHMANN_JSON_PASTE17(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE16(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16) -#define NLOHMANN_JSON_PASTE18(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE17(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17) -#define NLOHMANN_JSON_PASTE19(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE18(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18) -#define NLOHMANN_JSON_PASTE20(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE19(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19) -#define NLOHMANN_JSON_PASTE21(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE20(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20) -#define NLOHMANN_JSON_PASTE22(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE21(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21) -#define NLOHMANN_JSON_PASTE23(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE22(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22) -#define NLOHMANN_JSON_PASTE24(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE23(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23) -#define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) -#define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) -#define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) -#define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) -#define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) -#define NLOHMANN_JSON_PASTE30(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE29(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29) -#define NLOHMANN_JSON_PASTE31(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE30(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30) -#define NLOHMANN_JSON_PASTE32(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE31(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31) -#define NLOHMANN_JSON_PASTE33(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE32(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32) -#define NLOHMANN_JSON_PASTE34(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE33(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33) -#define NLOHMANN_JSON_PASTE35(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE34(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34) -#define NLOHMANN_JSON_PASTE36(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE35(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35) -#define NLOHMANN_JSON_PASTE37(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE36(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36) -#define NLOHMANN_JSON_PASTE38(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE37(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37) -#define NLOHMANN_JSON_PASTE39(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE38(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38) -#define NLOHMANN_JSON_PASTE40(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE39(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39) -#define NLOHMANN_JSON_PASTE41(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE40(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40) -#define NLOHMANN_JSON_PASTE42(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE41(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41) -#define NLOHMANN_JSON_PASTE43(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE42(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42) -#define NLOHMANN_JSON_PASTE44(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE43(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43) -#define NLOHMANN_JSON_PASTE45(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE44(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44) -#define NLOHMANN_JSON_PASTE46(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE45(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45) -#define NLOHMANN_JSON_PASTE47(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE46(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46) -#define NLOHMANN_JSON_PASTE48(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE47(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47) -#define NLOHMANN_JSON_PASTE49(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE48(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48) -#define NLOHMANN_JSON_PASTE50(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE49(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49) -#define NLOHMANN_JSON_PASTE51(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE50(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50) -#define NLOHMANN_JSON_PASTE52(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE51(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51) -#define NLOHMANN_JSON_PASTE53(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE52(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52) -#define NLOHMANN_JSON_PASTE54(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE53(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53) -#define NLOHMANN_JSON_PASTE55(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE54(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54) -#define NLOHMANN_JSON_PASTE56(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE55(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55) -#define NLOHMANN_JSON_PASTE57(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE56(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56) -#define NLOHMANN_JSON_PASTE58(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE57(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57) -#define NLOHMANN_JSON_PASTE59(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE58(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58) -#define NLOHMANN_JSON_PASTE60(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE59(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59) -#define NLOHMANN_JSON_PASTE61(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE60(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60) -#define NLOHMANN_JSON_PASTE62(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE61(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61) -#define NLOHMANN_JSON_PASTE63(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE62(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62) -#define NLOHMANN_JSON_PASTE64(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE63(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49, v50, v51, v52, v53, v54, v55, v56, v57, v58, v59, v60, v61, v62, v63) - -#define NLOHMANN_JSON_TO(v1) nlohmann_json_j[#v1] = nlohmann_json_t.v1; -#define NLOHMANN_JSON_FROM(v1) nlohmann_json_j.at(#v1).get_to(nlohmann_json_t.v1); -#define NLOHMANN_JSON_FROM_WITH_DEFAULT(v1) nlohmann_json_t.v1 = nlohmann_json_j.value(#v1, nlohmann_json_default_obj.v1); - -/*! -@brief macro -@def NLOHMANN_DEFINE_TYPE_INTRUSIVE -@since version 3.9.0 -*/ -#define NLOHMANN_DEFINE_TYPE_INTRUSIVE(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } - -#define NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT(Type, ...) \ - friend void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - friend void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { Type nlohmann_json_default_obj; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } - -/*! -@brief macro -@def NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE -@since version 3.9.0 -*/ -#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM, __VA_ARGS__)) } - -#define NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(Type, ...) \ - inline void to_json(nlohmann::json& nlohmann_json_j, const Type& nlohmann_json_t) { NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_TO, __VA_ARGS__)) } \ - inline void from_json(const nlohmann::json& nlohmann_json_j, Type& nlohmann_json_t) { Type nlohmann_json_default_obj; NLOHMANN_JSON_EXPAND(NLOHMANN_JSON_PASTE(NLOHMANN_JSON_FROM_WITH_DEFAULT, __VA_ARGS__)) } - - -// inspired from https://stackoverflow.com/a/26745591 -// allows to call any std function as if (e.g. with begin): -// using std::begin; begin(x); -// -// it allows using the detected idiom to retrieve the return type -// of such an expression -#define NLOHMANN_CAN_CALL_STD_FUNC_IMPL(std_name) \ - namespace detail { \ - using std::std_name; \ - \ - template \ - using result_of_##std_name = decltype(std_name(std::declval()...)); \ - } \ - \ - namespace detail2 { \ - struct std_name##_tag \ - { \ - }; \ - \ - template \ - std_name##_tag std_name(T&&...); \ - \ - template \ - using result_of_##std_name = decltype(std_name(std::declval()...)); \ - \ - template \ - struct would_call_std_##std_name \ - { \ - static constexpr auto const value = ::nlohmann::detail:: \ - is_detected_exact::value; \ - }; \ - } /* namespace detail2 */ \ - \ - template \ - struct would_call_std_##std_name : detail2::would_call_std_##std_name \ - { \ - } - -#ifndef JSON_USE_IMPLICIT_CONVERSIONS - #define JSON_USE_IMPLICIT_CONVERSIONS 1 -#endif - -#if JSON_USE_IMPLICIT_CONVERSIONS - #define JSON_EXPLICIT -#else - #define JSON_EXPLICIT explicit -#endif - -#ifndef JSON_DISABLE_ENUM_SERIALIZATION - #define JSON_DISABLE_ENUM_SERIALIZATION 0 -#endif - -#ifndef JSON_USE_GLOBAL_UDLS - #define JSON_USE_GLOBAL_UDLS 1 -#endif - -#if JSON_HAS_THREE_WAY_COMPARISON - #include // partial_ordering -#endif - -NLOHMANN_JSON_NAMESPACE_BEGIN -namespace detail -{ - -/////////////////////////// -// JSON type enumeration // -/////////////////////////// - -/*! -@brief the JSON type enumeration - -This enumeration collects the different JSON types. It is internally used to -distinguish the stored values, and the functions @ref basic_json::is_null(), -@ref basic_json::is_object(), @ref basic_json::is_array(), -@ref basic_json::is_string(), @ref basic_json::is_boolean(), -@ref basic_json::is_number() (with @ref basic_json::is_number_integer(), -@ref basic_json::is_number_unsigned(), and @ref basic_json::is_number_float()), -@ref basic_json::is_discarded(), @ref basic_json::is_primitive(), and -@ref basic_json::is_structured() rely on it. - -@note There are three enumeration entries (number_integer, number_unsigned, and -number_float), because the library distinguishes these three types for numbers: -@ref basic_json::number_unsigned_t is used for unsigned integers, -@ref basic_json::number_integer_t is used for signed integers, and -@ref basic_json::number_float_t is used for floating-point numbers or to -approximate integers which do not fit in the limits of their respective type. - -@sa see @ref basic_json::basic_json(const value_t value_type) -- create a JSON -value with the default value for a given type - -@since version 1.0.0 -*/ -enum class value_t : std::uint8_t -{ - null, ///< null value - object, ///< object (unordered set of name/value pairs) - array, ///< array (ordered collection of values) - string, ///< string value - boolean, ///< boolean value - number_integer, ///< number value (signed integer) - number_unsigned, ///< number value (unsigned integer) - number_float, ///< number value (floating-point) - binary, ///< binary array (ordered collection of bytes) - discarded ///< discarded by the parser callback function -}; - -/*! -@brief comparison operator for JSON types - -Returns an ordering that is similar to Python: -- order: null < boolean < number < object < array < string < binary -- furthermore, each type is not smaller than itself -- discarded values are not comparable -- binary is represented as a b"" string in python and directly comparable to a - string; however, making a binary array directly comparable with a string would - be surprising behavior in a JSON file. - -@since version 1.0.0 -*/ -#if JSON_HAS_THREE_WAY_COMPARISON - inline std::partial_ordering operator<=>(const value_t lhs, const value_t rhs) noexcept // *NOPAD* -#else - inline bool operator<(const value_t lhs, const value_t rhs) noexcept -#endif -{ - static constexpr std::array order = {{ - 0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */, - 1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */, - 6 /* binary */ - } - }; - - const auto l_index = static_cast(lhs); - const auto r_index = static_cast(rhs); -#if JSON_HAS_THREE_WAY_COMPARISON - if (l_index < order.size() && r_index < order.size()) - { - return order[l_index] <=> order[r_index]; // *NOPAD* - } - return std::partial_ordering::unordered; -#else - return l_index < order.size() && r_index < order.size() && order[l_index] < order[r_index]; -#endif -} - -// GCC selects the built-in operator< over an operator rewritten from -// a user-defined spaceship operator -// Clang, MSVC, and ICC select the rewritten candidate -// (see GCC bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105200) -#if JSON_HAS_THREE_WAY_COMPARISON && defined(__GNUC__) -inline bool operator<(const value_t lhs, const value_t rhs) noexcept -{ - return std::is_lt(lhs <=> rhs); // *NOPAD* -} -#endif - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN -namespace detail -{ - -/*! -@brief replace all occurrences of a substring by another string - -@param[in,out] s the string to manipulate; changed so that all - occurrences of @a f are replaced with @a t -@param[in] f the substring to replace with @a t -@param[in] t the string to replace @a f - -@pre The search string @a f must not be empty. **This precondition is -enforced with an assertion.** - -@since version 2.0.0 -*/ -template -inline void replace_substring(StringType& s, const StringType& f, - const StringType& t) -{ - JSON_ASSERT(!f.empty()); - for (auto pos = s.find(f); // find first occurrence of f - pos != StringType::npos; // make sure f was found - s.replace(pos, f.size(), t), // replace with t, and - pos = s.find(f, pos + t.size())) // find next occurrence of f - {} -} - -/*! - * @brief string escaping as described in RFC 6901 (Sect. 4) - * @param[in] s string to escape - * @return escaped string - * - * Note the order of escaping "~" to "~0" and "/" to "~1" is important. - */ -template -inline StringType escape(StringType s) -{ - replace_substring(s, StringType{"~"}, StringType{"~0"}); - replace_substring(s, StringType{"/"}, StringType{"~1"}); - return s; -} - -/*! - * @brief string unescaping as described in RFC 6901 (Sect. 4) - * @param[in] s string to unescape - * @return unescaped string - * - * Note the order of escaping "~1" to "/" and "~0" to "~" is important. - */ -template -static void unescape(StringType& s) -{ - replace_substring(s, StringType{"~1"}, StringType{"/"}); - replace_substring(s, StringType{"~0"}, StringType{"~"}); -} - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -#include // size_t - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN -namespace detail -{ - -/// struct to capture the start position of the current token -struct position_t -{ - /// the total number of characters read - std::size_t chars_read_total = 0; - /// the number of characters read in the current line - std::size_t chars_read_current_line = 0; - /// the number of lines read - std::size_t lines_read = 0; - - /// conversion to size_t to preserve SAX interface - constexpr operator size_t() const - { - return chars_read_total; - } -}; - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - -// #include - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-FileCopyrightText: 2018 The Abseil Authors -// SPDX-License-Identifier: MIT - - - -#include // array -#include // size_t -#include // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type -#include // index_sequence, make_index_sequence, index_sequence_for - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN -namespace detail -{ - -template -using uncvref_t = typename std::remove_cv::type>::type; - -#ifdef JSON_HAS_CPP_14 - -// the following utilities are natively available in C++14 -using std::enable_if_t; -using std::index_sequence; -using std::make_index_sequence; -using std::index_sequence_for; - -#else - -// alias templates to reduce boilerplate -template -using enable_if_t = typename std::enable_if::type; - -// The following code is taken from https://github.com/abseil/abseil-cpp/blob/10cb35e459f5ecca5b2ff107635da0bfa41011b4/absl/utility/utility.h -// which is part of Google Abseil (https://github.com/abseil/abseil-cpp), licensed under the Apache License 2.0. - -//// START OF CODE FROM GOOGLE ABSEIL - -// integer_sequence -// -// Class template representing a compile-time integer sequence. An instantiation -// of `integer_sequence` has a sequence of integers encoded in its -// type through its template arguments (which is a common need when -// working with C++11 variadic templates). `absl::integer_sequence` is designed -// to be a drop-in replacement for C++14's `std::integer_sequence`. -// -// Example: -// -// template< class T, T... Ints > -// void user_function(integer_sequence); -// -// int main() -// { -// // user_function's `T` will be deduced to `int` and `Ints...` -// // will be deduced to `0, 1, 2, 3, 4`. -// user_function(make_integer_sequence()); -// } -template -struct integer_sequence -{ - using value_type = T; - static constexpr std::size_t size() noexcept - { - return sizeof...(Ints); - } -}; - -// index_sequence -// -// A helper template for an `integer_sequence` of `size_t`, -// `absl::index_sequence` is designed to be a drop-in replacement for C++14's -// `std::index_sequence`. -template -using index_sequence = integer_sequence; - -namespace utility_internal -{ - -template -struct Extend; - -// Note that SeqSize == sizeof...(Ints). It's passed explicitly for efficiency. -template -struct Extend, SeqSize, 0> -{ - using type = integer_sequence < T, Ints..., (Ints + SeqSize)... >; -}; - -template -struct Extend, SeqSize, 1> -{ - using type = integer_sequence < T, Ints..., (Ints + SeqSize)..., 2 * SeqSize >; -}; - -// Recursion helper for 'make_integer_sequence'. -// 'Gen::type' is an alias for 'integer_sequence'. -template -struct Gen -{ - using type = - typename Extend < typename Gen < T, N / 2 >::type, N / 2, N % 2 >::type; -}; - -template -struct Gen -{ - using type = integer_sequence; -}; - -} // namespace utility_internal - -// Compile-time sequences of integers - -// make_integer_sequence -// -// This template alias is equivalent to -// `integer_sequence`, and is designed to be a drop-in -// replacement for C++14's `std::make_integer_sequence`. -template -using make_integer_sequence = typename utility_internal::Gen::type; - -// make_index_sequence -// -// This template alias is equivalent to `index_sequence<0, 1, ..., N-1>`, -// and is designed to be a drop-in replacement for C++14's -// `std::make_index_sequence`. -template -using make_index_sequence = make_integer_sequence; - -// index_sequence_for -// -// Converts a typename pack into an index sequence of the same length, and -// is designed to be a drop-in replacement for C++14's -// `std::index_sequence_for()` -template -using index_sequence_for = make_index_sequence; - -//// END OF CODE FROM GOOGLE ABSEIL - -#endif - -// dispatch utility (taken from ranges-v3) -template struct priority_tag : priority_tag < N - 1 > {}; -template<> struct priority_tag<0> {}; - -// taken from ranges-v3 -template -struct static_const -{ - static JSON_INLINE_VARIABLE constexpr T value{}; -}; - -#ifndef JSON_HAS_CPP_17 - template - constexpr T static_const::value; -#endif - -template -inline constexpr std::array make_array(Args&& ... args) -{ - return std::array {{static_cast(std::forward(args))...}}; -} - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -#include // numeric_limits -#include // false_type, is_constructible, is_integral, is_same, true_type -#include // declval -#include // tuple - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -#include // random_access_iterator_tag - -// #include - -// #include - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN -namespace detail -{ - -template -struct iterator_types {}; - -template -struct iterator_types < - It, - void_t> -{ - using difference_type = typename It::difference_type; - using value_type = typename It::value_type; - using pointer = typename It::pointer; - using reference = typename It::reference; - using iterator_category = typename It::iterator_category; -}; - -// This is required as some compilers implement std::iterator_traits in a way that -// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341. -template -struct iterator_traits -{ -}; - -template -struct iterator_traits < T, enable_if_t < !std::is_pointer::value >> - : iterator_types -{ -}; - -template -struct iterator_traits::value>> -{ - using iterator_category = std::random_access_iterator_tag; - using value_type = T; - using difference_type = ptrdiff_t; - using pointer = T*; - using reference = T&; -}; - -} // namespace detail -NLOHMANN_JSON_NAMESPACE_END - -// #include - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN - -NLOHMANN_CAN_CALL_STD_FUNC_IMPL(begin); - -NLOHMANN_JSON_NAMESPACE_END - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - - - -// #include - - -NLOHMANN_JSON_NAMESPACE_BEGIN - -NLOHMANN_CAN_CALL_STD_FUNC_IMPL(end); - -NLOHMANN_JSON_NAMESPACE_END - -// #include - -// #include - -// #include -// __ _____ _____ _____ -// __| | __| | | | JSON for Modern C++ -// | | |__ | | | | | | version 3.11.2 -// |_____|_____|_____|_|___| https://github.com/nlohmann/json -// -// SPDX-FileCopyrightText: 2013-2022 Niels Lohmann -// SPDX-License-Identifier: MIT - -#ifndef INCLUDE_NLOHMANN_JSON_FWD_HPP_ - #define INCLUDE_NLOHMANN_JSON_FWD_HPP_ - - #include // int64_t, uint64_t - #include // map - #include // allocator - #include // string - #include // vector - - // #include - - - /*! - @brief namespace for Niels Lohmann - @see https://github.com/nlohmann - @since version 1.0.0 - */ - NLOHMANN_JSON_NAMESPACE_BEGIN - - /*! - @brief default JSONSerializer template argument - - This serializer ignores the template arguments and uses ADL - ([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl)) - for serialization. - */ - template - struct adl_serializer; - - /// a class to store JSON values - /// @sa https://json.nlohmann.me/api/basic_json/ - template class ObjectType = - std::map, - template class ArrayType = std::vector, - class StringType = std::string, class BooleanType = bool, - class NumberIntegerType = std::int64_t, - class NumberUnsignedType = std::uint64_t, - class NumberFloatType = double, - template class AllocatorType = std::allocator, - template class JSONSerializer = - adl_serializer, - class BinaryType = std::vector> - class basic_json; - - /// @brief JSON Pointer defines a string syntax for identifying a specific value within a JSON document - /// @sa https://json.nlohmann.me/api/json_pointer/ - template - class json_pointer; - - /*! - @brief default specialization - @sa https://json.nlohmann.me/api/json/ - */ - using json = basic_json<>; - - /// @brief a minimal map-like container that preserves insertion order - /// @sa https://json.nlohmann.me/api/ordered_map/ - template - struct ordered_map; - - /// @brief specialization that maintains the insertion order of object keys - /// @sa https://json.nlohmann.me/api/ordered_json/ - using ordered_json = basic_json; - - NLOHMANN_JSON_NAMESPACE_END - -#endif // INCLUDE_NLOHMANN_JSON_FWD_HPP_ - - -NLOHMANN_JSON_NAMESPACE_BEGIN -/*! -@brief detail namespace with internal helper functions - -This namespace collects functions that should not be exposed, -implementations of some @ref basic_json methods, and meta-programming helpers. - -@since version 2.1.0 -*/ -namespace detail -{ - -///////////// -// helpers // -///////////// - -// Note to maintainers: -// -// Every trait in this file expects a non CV-qualified type. -// The only exceptions are in the 'aliases for detected' section -// (i.e. those of the form: decltype(T::member_function(std::declval()))) -// -// In this case, T has to be properly CV-qualified to constraint the function arguments -// (e.g. to_json(BasicJsonType&, const T&)) - -template struct is_basic_json : std::false_type {}; - -NLOHMANN_BASIC_JSON_TPL_DECLARATION -struct is_basic_json : std::true_type {}; - -// used by exceptions create() member functions -// true_type for pointer to possibly cv-qualified basic_json or std::nullptr_t -// false_type otherwise -template -struct is_basic_json_context : - std::integral_constant < bool, - is_basic_json::type>::type>::value - || std::is_same::value > -{}; - -////////////////////// -// json_ref helpers // -////////////////////// - -template -class json_ref; - -template -struct is_json_ref : std::false_type {}; - -template -struct is_json_ref> : std::true_type {}; - -////////////////////////// -// aliases for detected // -////////////////////////// - -template -using mapped_type_t = typename T::mapped_type; - -template -using key_type_t = typename T::key_type; - -template -using value_type_t = typename T::value_type; - -template -using difference_type_t = typename T::difference_type; - -template -using pointer_t = typename T::pointer; - -template -using reference_t = typename T::reference; - -template -using iterator_category_t = typename T::iterator_category; - -template -using to_json_function = decltype(T::to_json(std::declval()...)); - -template -using from_json_function = decltype(T::from_json(std::declval()...)); - -template -using get_template_function = decltype(std::declval().template get()); - -// trait checking if JSONSerializer::from_json(json const&, udt&) exists -template -struct has_from_json : std::false_type {}; - -// trait checking if j.get is valid -// use this trait instead of std::is_constructible or std::is_convertible, -// both rely on, or make use of implicit conversions, and thus fail when T -// has several constructors/operator= (see https://github.com/nlohmann/json/issues/958) -template -struct is_getable -{ - static constexpr bool value = is_detected::value; -}; - -template -struct has_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> -{ - using serializer = typename BasicJsonType::template json_serializer; - - static constexpr bool value = - is_detected_exact::value; -}; - -// This trait checks if JSONSerializer::from_json(json const&) exists -// this overload is used for non-default-constructible user-defined-types -template -struct has_non_default_from_json : std::false_type {}; - -template -struct has_non_default_from_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> -{ - using serializer = typename BasicJsonType::template json_serializer; - - static constexpr bool value = - is_detected_exact::value; -}; - -// This trait checks if BasicJsonType::json_serializer::to_json exists -// Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion. -template -struct has_to_json : std::false_type {}; - -template -struct has_to_json < BasicJsonType, T, enable_if_t < !is_basic_json::value >> -{ - using serializer = typename BasicJsonType::template json_serializer; - - static constexpr bool value = - is_detected_exact::value; -}; - -template -using detect_key_compare = typename T::key_compare; - -template -struct has_key_compare : std::integral_constant::value> {}; - -// obtains the actual object key comparator -template -struct actual_object_comparator -{ - using object_t = typename BasicJsonType::object_t; - using object_comparator_t = typename BasicJsonType::default_object_comparator_t; - using type = typename std::conditional < has_key_compare::value, - typename object_t::key_compare, object_comparator_t>::type; -}; - -template -using actual_object_comparator_t = typename actual_object_comparator::type; - -/////////////////// -// is_ functions // -/////////////////// - -// https://en.cppreference.com/w/cpp/types/conjunction -template struct conjunction : std::true_type { }; -template struct conjunction : B { }; -template -struct conjunction -: std::conditional(B::value), conjunction, B>::type {}; - -// https://en.cppreference.com/w/cpp/types/negation -template struct negation : std::integral_constant < bool, !B::value > { }; - -// Reimplementation of is_constructible and is_default_constructible, due to them being broken for -// std::pair and std::tuple until LWG 2367 fix (see https://cplusplus.github.io/LWG/lwg-defects.html#2367). -// This causes compile errors in e.g. clang 3.5 or gcc 4.9. -template -struct is_default_constructible : std::is_default_constructible {}; - -template -struct is_default_constructible> - : conjunction, is_default_constructible> {}; - -template -struct is_default_constructible> - : conjunction, is_default_constructible> {}; - -template -struct is_default_constructible> - : conjunction...> {}; - -template -struct is_default_constructible> - : conjunction...> {}; - - -template -struct is_constructible : std::is_constructible {}; - -template -struct is_constructible> : is_default_constructible> {}; - -template -struct is_constructible> : is_default_constructible> {}; - -template -struct is_constructible> : is_default_constructible> {}; - -template -struct is_constructible> : is_default_constructible> {}; - - -template -struct is_iterator_traits : std::false_type {}; - -template -struct is_iterator_traits> -{ - private: - using traits = iterator_traits; - - public: - static constexpr auto value = - is_detected::value && - is_detected::value && - is_detected::value && - is_detected::value && - is_detected::value; -}; - -template -struct is_range -{ - private: - using t_ref = typename std::add_lvalue_reference::type; - - using iterator = detected_t; - using sentinel = detected_t; - - // to be 100% correct, it should use https://en.cppreference.com/w/cpp/iterator/input_or_output_iterator - // and https://en.cppreference.com/w/cpp/iterator/sentinel_for - // but reimplementing these would be too much work, as a lot of other concepts are used underneath - static constexpr auto is_iterator_begin = - is_iterator_traits>::value; - - public: - static constexpr bool value = !std::is_same::value && !std::is_same::value && is_iterator_begin; -}; - -template -using iterator_t = enable_if_t::value, result_of_begin())>>; - -template -using range_value_t = value_type_t>>; - -// The following implementation of is_complete_type is taken from -// https://blogs.msdn.microsoft.com/vcblog/2015/12/02/partial-support-for-expression-sfinae-in-vs-2015-update-1/ -// and is written by Xiang Fan who agreed to using it in this library. - -template -struct is_complete_type : std::false_type {}; - -template -struct is_complete_type : std::true_type {}; - -template -struct is_compatible_object_type_impl : std::false_type {}; - -template -struct is_compatible_object_type_impl < - BasicJsonType, CompatibleObjectType, - enable_if_t < is_detected::value&& - is_detected::value >> -{ - using object_t = typename BasicJsonType::object_t; - - // macOS's is_constructible does not play well with nonesuch... - static constexpr bool value = - is_constructible::value && - is_constructible::value; -}; - -template -struct is_compatible_object_type - : is_compatible_object_type_impl {}; - -template -struct is_constructible_object_type_impl : std::false_type {}; - -template -struct is_constructible_object_type_impl < - BasicJsonType, ConstructibleObjectType, - enable_if_t < is_detected::value&& - is_detected::value >> -{ - using object_t = typename BasicJsonType::object_t; - - static constexpr bool value = - (is_default_constructible::value && - (std::is_move_assignable::value || - std::is_copy_assignable::value) && - (is_constructible::value && - std::is_same < - typename object_t::mapped_type, - typename ConstructibleObjectType::mapped_type >::value)) || - (has_from_json::value || - has_non_default_from_json < - BasicJsonType, - typename ConstructibleObjectType::mapped_type >::value); -}; - -template -struct is_constructible_object_type - : is_constructible_object_type_impl {}; - -template -struct is_compatible_string_type -{ - static constexpr auto value = - is_constructible::value; -}; - -template -struct is_constructible_string_type -{ - // launder type through decltype() to fix compilation failure on ICPC -#ifdef __INTEL_COMPILER - using laundered_type = decltype(std::declval()); -#else - using laundered_type = ConstructibleStringType; -#endif - - static constexpr auto value = - conjunction < - is_constructible, - is_detected_exact>::value; -}; - -template -struct is_compatible_array_type_impl : std::false_type {}; - -template -struct is_compatible_array_type_impl < - BasicJsonType, CompatibleArrayType, - enable_if_t < - is_detected::value&& - is_iterator_traits>>::value&& -// special case for types like std::filesystem::path whose iterator's value_type are themselves -// c.f. https://github.com/nlohmann/json/pull/3073 - !std::is_same>::value >> -{ - static constexpr bool value = - is_constructible>::value; -}; - -template -struct is_compatible_array_type - : is_compatible_array_type_impl {}; - -template -struct is_constructible_array_type_impl : std::false_type {}; - -template -struct is_constructible_array_type_impl < - BasicJsonType, ConstructibleArrayType, - enable_if_t::value >> - : std::true_type {}; - -template -struct is_constructible_array_type_impl < - BasicJsonType, ConstructibleArrayType, - enable_if_t < !std::is_same::value&& - !is_compatible_string_type::value&& - is_default_constructible::value&& -(std::is_move_assignable::value || - std::is_copy_assignable::value)&& -is_detected::value&& -is_iterator_traits>>::value&& -is_detected::value&& -// special case for types like std::filesystem::path whose iterator's value_type are themselves -// c.f. https://github.com/nlohmann/json/pull/3073 -!std::is_same>::value&& - is_complete_type < - detected_t>::value >> -{ - using value_type = range_value_t; - - static constexpr bool value = - std::is_same::value || - has_from_json::value || - has_non_default_from_json < - BasicJsonType, - value_type >::value; -}; - -template -struct is_constructible_array_type - : is_constructible_array_type_impl {}; - -template -struct is_compatible_integer_type_impl : std::false_type {}; - -template -struct is_compatible_integer_type_impl < - RealIntegerType, CompatibleNumberIntegerType, - enable_if_t < std::is_integral::value&& - std::is_integral::value&& - !std::is_same::value >> -{ - // is there an assert somewhere on overflows? - using RealLimits = std::numeric_limits; - using CompatibleLimits = std::numeric_limits; - - static constexpr auto value = - is_constructible::value && - CompatibleLimits::is_integer && - RealLimits::is_signed == CompatibleLimits::is_signed; -}; - -template -struct is_compatible_integer_type - : is_compatible_integer_type_impl {}; - -template -struct is_compatible_type_impl: std::false_type {}; - -template -struct is_compatible_type_impl < - BasicJsonType, CompatibleType, - enable_if_t::value >> -{ - static constexpr bool value = - has_to_json::value; -}; - -template -struct is_compatible_type - : is_compatible_type_impl {}; - -template -struct is_constructible_tuple : std::false_type {}; - -template -struct is_constructible_tuple> : conjunction...> {}; - -template -struct is_json_iterator_of : std::false_type {}; - -template -struct is_json_iterator_of : std::true_type {}; - -template -struct is_json_iterator_of : std::true_type -{}; - -// checks if a given type T is a template specialization of Primary -template